2015-05-23 22:32:32 -06:00
|
|
|
// Copyright 2010 Dolphin Emulator Project
|
|
|
|
// Licensed under GPLv2+
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2017-01-23 09:20:20 -07:00
|
|
|
#include "VideoCommon/VertexManagerBase.h"
|
|
|
|
|
2017-04-30 02:07:57 -06:00
|
|
|
#include <array>
|
2017-03-03 15:36:51 -07:00
|
|
|
#include <cmath>
|
2015-12-20 19:49:49 -07:00
|
|
|
#include <memory>
|
|
|
|
|
2016-01-17 14:54:31 -07:00
|
|
|
#include "Common/BitSet.h"
|
|
|
|
#include "Common/ChunkFile.h"
|
2014-09-07 19:06:58 -06:00
|
|
|
#include "Common/CommonTypes.h"
|
2016-01-17 14:54:31 -07:00
|
|
|
#include "Common/Logging/Log.h"
|
2018-01-06 02:50:25 -07:00
|
|
|
#include "Common/MathUtil.h"
|
|
|
|
|
2017-03-03 15:36:51 -07:00
|
|
|
#include "Core/ConfigManager.h"
|
2020-09-15 04:13:10 -06:00
|
|
|
#include "Core/DolphinAnalytics.h"
|
2014-02-17 03:18:15 -07:00
|
|
|
|
2016-01-17 14:54:31 -07:00
|
|
|
#include "VideoCommon/BPMemory.h"
|
2019-02-14 18:59:50 -07:00
|
|
|
#include "VideoCommon/BoundingBox.h"
|
2016-01-17 14:54:31 -07:00
|
|
|
#include "VideoCommon/DataReader.h"
|
2019-02-14 18:59:50 -07:00
|
|
|
#include "VideoCommon/FramebufferManager.h"
|
2014-12-14 13:23:13 -07:00
|
|
|
#include "VideoCommon/GeometryShaderManager.h"
|
2014-02-17 03:18:15 -07:00
|
|
|
#include "VideoCommon/IndexGenerator.h"
|
|
|
|
#include "VideoCommon/NativeVertexFormat.h"
|
|
|
|
#include "VideoCommon/OpcodeDecoding.h"
|
|
|
|
#include "VideoCommon/PerfQueryBase.h"
|
|
|
|
#include "VideoCommon/PixelShaderManager.h"
|
|
|
|
#include "VideoCommon/RenderBase.h"
|
2017-09-09 02:30:15 -06:00
|
|
|
#include "VideoCommon/SamplerCommon.h"
|
2018-11-27 00:16:53 -07:00
|
|
|
#include "VideoCommon/Statistics.h"
|
2014-02-17 03:18:15 -07:00
|
|
|
#include "VideoCommon/TextureCacheBase.h"
|
2015-01-23 07:15:09 -07:00
|
|
|
#include "VideoCommon/VertexLoaderManager.h"
|
2014-02-17 03:18:15 -07:00
|
|
|
#include "VideoCommon/VertexShaderManager.h"
|
2017-01-23 09:20:20 -07:00
|
|
|
#include "VideoCommon/VideoBackendBase.h"
|
2019-12-22 11:40:40 -07:00
|
|
|
#include "VideoCommon/VideoCommon.h"
|
2014-02-17 03:18:15 -07:00
|
|
|
#include "VideoCommon/VideoConfig.h"
|
|
|
|
#include "VideoCommon/XFMemory.h"
|
2010-10-03 02:20:24 -06:00
|
|
|
|
2015-12-20 19:49:49 -07:00
|
|
|
std::unique_ptr<VertexManagerBase> g_vertex_manager;
|
2010-10-03 02:20:24 -06:00
|
|
|
|
2017-04-30 02:07:57 -06:00
|
|
|
// GX primitive -> RenderState primitive, no primitive restart
|
2017-11-18 23:47:21 -07:00
|
|
|
constexpr std::array<PrimitiveType, 8> primitive_from_gx{{
|
2017-04-30 02:07:57 -06:00
|
|
|
PrimitiveType::Triangles, // GX_DRAW_QUADS
|
|
|
|
PrimitiveType::Triangles, // GX_DRAW_QUADS_2
|
|
|
|
PrimitiveType::Triangles, // GX_DRAW_TRIANGLES
|
|
|
|
PrimitiveType::Triangles, // GX_DRAW_TRIANGLE_STRIP
|
|
|
|
PrimitiveType::Triangles, // GX_DRAW_TRIANGLE_FAN
|
|
|
|
PrimitiveType::Lines, // GX_DRAW_LINES
|
|
|
|
PrimitiveType::Lines, // GX_DRAW_LINE_STRIP
|
|
|
|
PrimitiveType::Points, // GX_DRAW_POINTS
|
2017-11-18 23:47:21 -07:00
|
|
|
}};
|
2017-04-30 02:07:57 -06:00
|
|
|
|
|
|
|
// GX primitive -> RenderState primitive, using primitive restart
|
2017-11-18 23:47:21 -07:00
|
|
|
constexpr std::array<PrimitiveType, 8> primitive_from_gx_pr{{
|
2017-04-30 02:07:57 -06:00
|
|
|
PrimitiveType::TriangleStrip, // GX_DRAW_QUADS
|
|
|
|
PrimitiveType::TriangleStrip, // GX_DRAW_QUADS_2
|
|
|
|
PrimitiveType::TriangleStrip, // GX_DRAW_TRIANGLES
|
|
|
|
PrimitiveType::TriangleStrip, // GX_DRAW_TRIANGLE_STRIP
|
|
|
|
PrimitiveType::TriangleStrip, // GX_DRAW_TRIANGLE_FAN
|
|
|
|
PrimitiveType::Lines, // GX_DRAW_LINES
|
|
|
|
PrimitiveType::Lines, // GX_DRAW_LINE_STRIP
|
|
|
|
PrimitiveType::Points, // GX_DRAW_POINTS
|
2017-11-18 23:47:21 -07:00
|
|
|
}};
|
2014-01-15 13:44:46 -07:00
|
|
|
|
2017-03-03 15:36:51 -07:00
|
|
|
// Due to the BT.601 standard which the GameCube is based on being a compromise
|
|
|
|
// between PAL and NTSC, neither standard gets square pixels. They are each off
|
|
|
|
// by ~9% in opposite directions.
|
|
|
|
// Just in case any game decides to take this into account, we do both these
|
|
|
|
// tests with a large amount of slop.
|
2020-01-25 18:04:50 -07:00
|
|
|
static constexpr float ASPECT_RATIO_SLOP = 0.11f;
|
|
|
|
|
|
|
|
static bool IsAnamorphicProjection(const Projection::Raw& projection, const Viewport& viewport)
|
2017-03-03 15:36:51 -07:00
|
|
|
{
|
2020-01-25 18:04:50 -07:00
|
|
|
// If ratio between our projection and viewport aspect ratios is similar to 16:9 / 4:3
|
|
|
|
// we have an anamorphic projection.
|
|
|
|
static constexpr float IDEAL_RATIO = (16 / 9.f) / (4 / 3.f);
|
|
|
|
|
|
|
|
const float projection_ar = projection[2] / projection[0];
|
|
|
|
const float viewport_ar = viewport.wd / viewport.ht;
|
|
|
|
|
|
|
|
return std::abs(std::abs(projection_ar / viewport_ar) - IDEAL_RATIO) <
|
|
|
|
IDEAL_RATIO * ASPECT_RATIO_SLOP;
|
2017-03-03 15:36:51 -07:00
|
|
|
}
|
|
|
|
|
2020-01-25 18:04:50 -07:00
|
|
|
static bool IsNormalProjection(const Projection::Raw& projection, const Viewport& viewport)
|
2017-03-03 15:36:51 -07:00
|
|
|
{
|
2020-01-25 18:04:50 -07:00
|
|
|
const float projection_ar = projection[2] / projection[0];
|
|
|
|
const float viewport_ar = viewport.wd / viewport.ht;
|
|
|
|
return std::abs(std::abs(projection_ar / viewport_ar) - 1) < ASPECT_RATIO_SLOP;
|
2017-03-03 15:36:51 -07:00
|
|
|
}
|
|
|
|
|
2015-11-01 14:54:41 -07:00
|
|
|
VertexManagerBase::VertexManagerBase()
|
2019-02-14 18:59:50 -07:00
|
|
|
: m_cpu_vertex_buffer(MAXVBUFFERSIZE), m_cpu_index_buffer(MAXIBUFFERSIZE)
|
2010-10-03 02:20:24 -06:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-02-14 18:59:50 -07:00
|
|
|
VertexManagerBase::~VertexManagerBase() = default;
|
|
|
|
|
|
|
|
bool VertexManagerBase::Initialize()
|
2013-04-24 07:21:54 -06:00
|
|
|
{
|
2019-12-05 08:01:33 -07:00
|
|
|
m_index_generator.Init();
|
2019-02-14 18:59:50 -07:00
|
|
|
return true;
|
2013-04-24 07:21:54 -06:00
|
|
|
}
|
2010-10-03 02:20:24 -06:00
|
|
|
|
2016-08-21 21:02:37 -06:00
|
|
|
u32 VertexManagerBase::GetRemainingSize() const
|
2010-10-03 02:20:24 -06:00
|
|
|
{
|
2016-08-21 21:02:37 -06:00
|
|
|
return static_cast<u32>(m_end_buffer_pointer - m_cur_buffer_pointer);
|
2013-02-26 21:47:50 -07:00
|
|
|
}
|
|
|
|
|
2019-12-05 08:01:33 -07:00
|
|
|
void VertexManagerBase::AddIndices(int primitive, u32 num_vertices)
|
|
|
|
{
|
|
|
|
m_index_generator.AddIndices(primitive, num_vertices);
|
|
|
|
}
|
|
|
|
|
2016-06-24 02:43:46 -06:00
|
|
|
DataReader VertexManagerBase::PrepareForAdditionalData(int primitive, u32 count, u32 stride,
|
|
|
|
bool cullall)
|
2013-10-28 23:23:17 -06:00
|
|
|
{
|
2019-03-02 00:05:38 -07:00
|
|
|
// Flush all EFB pokes. Since the buffer is shared, we can't draw pokes+primitives concurrently.
|
2019-02-14 18:59:50 -07:00
|
|
|
g_framebuffer_manager->FlushEFBPokes();
|
|
|
|
|
2016-06-24 02:43:46 -06:00
|
|
|
// The SSE vertex loader can write up to 4 bytes past the end
|
|
|
|
u32 const needed_vertex_bytes = count * stride + 4;
|
|
|
|
|
|
|
|
// We can't merge different kinds of primitives, so we have to flush here
|
2017-04-30 02:07:57 -06:00
|
|
|
PrimitiveType new_primitive_type = g_ActiveConfig.backend_info.bSupportsPrimitiveRestart ?
|
|
|
|
primitive_from_gx_pr[primitive] :
|
|
|
|
primitive_from_gx[primitive];
|
|
|
|
if (m_current_primitive_type != new_primitive_type)
|
|
|
|
{
|
2016-06-24 02:43:46 -06:00
|
|
|
Flush();
|
2017-04-30 02:07:57 -06:00
|
|
|
|
|
|
|
// Have to update the rasterization state for point/line cull modes.
|
2018-02-24 08:15:35 -07:00
|
|
|
m_current_primitive_type = new_primitive_type;
|
|
|
|
SetRasterizationStateChanged();
|
2017-04-30 02:07:57 -06:00
|
|
|
}
|
2016-06-24 02:43:46 -06:00
|
|
|
|
|
|
|
// Check for size in buffer, if the buffer gets full, call Flush()
|
2016-08-21 21:02:37 -06:00
|
|
|
if (!m_is_flushed &&
|
2019-12-05 08:01:33 -07:00
|
|
|
(count > m_index_generator.GetRemainingIndices() || count > GetRemainingIndices(primitive) ||
|
2016-06-24 02:43:46 -06:00
|
|
|
needed_vertex_bytes > GetRemainingSize()))
|
|
|
|
{
|
|
|
|
Flush();
|
|
|
|
|
2019-12-05 08:01:33 -07:00
|
|
|
if (count > m_index_generator.GetRemainingIndices())
|
2020-11-13 20:33:26 -07:00
|
|
|
{
|
|
|
|
ERROR_LOG_FMT(VIDEO, "Too little remaining index values. Use 32-bit or reset them on flush.");
|
|
|
|
}
|
2016-06-24 02:43:46 -06:00
|
|
|
if (count > GetRemainingIndices(primitive))
|
2020-11-13 20:33:26 -07:00
|
|
|
{
|
|
|
|
ERROR_LOG_FMT(VIDEO, "VertexManager: Buffer not large enough for all indices! "
|
|
|
|
"Increase MAXIBUFFERSIZE or we need primitive breaking after all.");
|
|
|
|
}
|
2016-06-24 02:43:46 -06:00
|
|
|
if (needed_vertex_bytes > GetRemainingSize())
|
2020-11-13 20:33:26 -07:00
|
|
|
{
|
|
|
|
ERROR_LOG_FMT(VIDEO, "VertexManager: Buffer not large enough for all vertices! "
|
|
|
|
"Increase MAXVBUFFERSIZE or we need primitive breaking after all.");
|
|
|
|
}
|
2016-06-24 02:43:46 -06:00
|
|
|
}
|
|
|
|
|
2016-08-21 21:02:37 -06:00
|
|
|
m_cull_all = cullall;
|
2016-08-21 21:46:52 -06:00
|
|
|
|
2016-06-24 02:43:46 -06:00
|
|
|
// need to alloc new buffer
|
2016-08-21 21:02:37 -06:00
|
|
|
if (m_is_flushed)
|
2016-06-24 02:43:46 -06:00
|
|
|
{
|
2019-02-14 18:59:50 -07:00
|
|
|
if (cullall)
|
|
|
|
{
|
|
|
|
// This buffer isn't getting sent to the GPU. Just allocate it on the cpu.
|
|
|
|
m_cur_buffer_pointer = m_base_buffer_pointer = m_cpu_vertex_buffer.data();
|
|
|
|
m_end_buffer_pointer = m_base_buffer_pointer + m_cpu_vertex_buffer.size();
|
2019-12-05 08:01:33 -07:00
|
|
|
m_index_generator.Start(m_cpu_index_buffer.data());
|
2019-02-14 18:59:50 -07:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ResetBuffer(stride);
|
|
|
|
}
|
|
|
|
|
2016-08-21 21:02:37 -06:00
|
|
|
m_is_flushed = false;
|
2016-06-24 02:43:46 -06:00
|
|
|
}
|
|
|
|
|
2016-08-21 21:02:37 -06:00
|
|
|
return DataReader(m_cur_buffer_pointer, m_end_buffer_pointer);
|
2014-12-09 00:35:04 -07:00
|
|
|
}
|
|
|
|
|
2015-11-01 14:54:41 -07:00
|
|
|
void VertexManagerBase::FlushData(u32 count, u32 stride)
|
2014-12-09 00:35:04 -07:00
|
|
|
{
|
2016-08-21 21:02:37 -06:00
|
|
|
m_cur_buffer_pointer += count * stride;
|
2013-02-22 00:41:52 -07:00
|
|
|
}
|
|
|
|
|
2019-12-05 08:01:33 -07:00
|
|
|
u32 VertexManagerBase::GetRemainingIndices(int primitive) const
|
2010-10-03 02:20:24 -06:00
|
|
|
{
|
2019-12-05 08:01:33 -07:00
|
|
|
const u32 index_len = MAXIBUFFERSIZE - m_index_generator.GetIndexLen();
|
2016-06-24 02:43:46 -06:00
|
|
|
|
|
|
|
if (g_Config.backend_info.bSupportsPrimitiveRestart)
|
|
|
|
{
|
|
|
|
switch (primitive)
|
|
|
|
{
|
2017-02-07 21:11:04 -07:00
|
|
|
case OpcodeDecoder::GX_DRAW_QUADS:
|
|
|
|
case OpcodeDecoder::GX_DRAW_QUADS_2:
|
2016-06-24 02:43:46 -06:00
|
|
|
return index_len / 5 * 4;
|
2017-02-07 21:11:04 -07:00
|
|
|
case OpcodeDecoder::GX_DRAW_TRIANGLES:
|
2016-06-24 02:43:46 -06:00
|
|
|
return index_len / 4 * 3;
|
2017-02-07 21:11:04 -07:00
|
|
|
case OpcodeDecoder::GX_DRAW_TRIANGLE_STRIP:
|
2016-06-24 02:43:46 -06:00
|
|
|
return index_len / 1 - 1;
|
2017-02-07 21:11:04 -07:00
|
|
|
case OpcodeDecoder::GX_DRAW_TRIANGLE_FAN:
|
2016-06-24 02:43:46 -06:00
|
|
|
return index_len / 6 * 4 + 1;
|
|
|
|
|
2017-02-07 21:11:04 -07:00
|
|
|
case OpcodeDecoder::GX_DRAW_LINES:
|
2016-06-24 02:43:46 -06:00
|
|
|
return index_len;
|
2017-02-07 21:11:04 -07:00
|
|
|
case OpcodeDecoder::GX_DRAW_LINE_STRIP:
|
2016-06-24 02:43:46 -06:00
|
|
|
return index_len / 2 + 1;
|
|
|
|
|
2017-02-07 21:11:04 -07:00
|
|
|
case OpcodeDecoder::GX_DRAW_POINTS:
|
2016-06-24 02:43:46 -06:00
|
|
|
return index_len;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
switch (primitive)
|
|
|
|
{
|
2017-02-07 21:11:04 -07:00
|
|
|
case OpcodeDecoder::GX_DRAW_QUADS:
|
|
|
|
case OpcodeDecoder::GX_DRAW_QUADS_2:
|
2016-06-24 02:43:46 -06:00
|
|
|
return index_len / 6 * 4;
|
2017-02-07 21:11:04 -07:00
|
|
|
case OpcodeDecoder::GX_DRAW_TRIANGLES:
|
2016-06-24 02:43:46 -06:00
|
|
|
return index_len;
|
2017-02-07 21:11:04 -07:00
|
|
|
case OpcodeDecoder::GX_DRAW_TRIANGLE_STRIP:
|
2016-06-24 02:43:46 -06:00
|
|
|
return index_len / 3 + 2;
|
2017-02-07 21:11:04 -07:00
|
|
|
case OpcodeDecoder::GX_DRAW_TRIANGLE_FAN:
|
2016-06-24 02:43:46 -06:00
|
|
|
return index_len / 3 + 2;
|
|
|
|
|
2017-02-07 21:11:04 -07:00
|
|
|
case OpcodeDecoder::GX_DRAW_LINES:
|
2016-06-24 02:43:46 -06:00
|
|
|
return index_len;
|
2017-02-07 21:11:04 -07:00
|
|
|
case OpcodeDecoder::GX_DRAW_LINE_STRIP:
|
2016-06-24 02:43:46 -06:00
|
|
|
return index_len / 2 + 1;
|
|
|
|
|
2017-02-07 21:11:04 -07:00
|
|
|
case OpcodeDecoder::GX_DRAW_POINTS:
|
2016-06-24 02:43:46 -06:00
|
|
|
return index_len;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2010-10-03 02:20:24 -06:00
|
|
|
}
|
|
|
|
|
2020-01-25 18:04:50 -07:00
|
|
|
auto VertexManagerBase::ResetFlushAspectRatioCount() -> FlushStatistics
|
2017-03-03 15:36:51 -07:00
|
|
|
{
|
2020-01-25 18:04:50 -07:00
|
|
|
const auto result = m_flush_statistics;
|
|
|
|
m_flush_statistics = {};
|
|
|
|
return result;
|
2017-03-03 15:36:51 -07:00
|
|
|
}
|
|
|
|
|
2019-02-14 18:59:50 -07:00
|
|
|
void VertexManagerBase::ResetBuffer(u32 vertex_stride)
|
|
|
|
{
|
|
|
|
m_base_buffer_pointer = m_cpu_vertex_buffer.data();
|
|
|
|
m_cur_buffer_pointer = m_cpu_vertex_buffer.data();
|
|
|
|
m_end_buffer_pointer = m_base_buffer_pointer + m_cpu_vertex_buffer.size();
|
2019-12-05 08:01:33 -07:00
|
|
|
m_index_generator.Start(m_cpu_index_buffer.data());
|
2019-02-14 18:59:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void VertexManagerBase::CommitBuffer(u32 num_vertices, u32 vertex_stride, u32 num_indices,
|
|
|
|
u32* out_base_vertex, u32* out_base_index)
|
|
|
|
{
|
|
|
|
*out_base_vertex = 0;
|
|
|
|
*out_base_index = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void VertexManagerBase::DrawCurrentBatch(u32 base_index, u32 num_indices, u32 base_vertex)
|
|
|
|
{
|
|
|
|
// If bounding box is enabled, we need to flush any changes first, then invalidate what we have.
|
2019-12-05 08:58:03 -07:00
|
|
|
if (BoundingBox::IsEnabled() && g_ActiveConfig.bBBoxEnable &&
|
2019-02-14 18:59:50 -07:00
|
|
|
g_ActiveConfig.backend_info.bSupportsBBox)
|
|
|
|
{
|
|
|
|
g_renderer->BBoxFlush();
|
|
|
|
}
|
|
|
|
|
|
|
|
g_renderer->DrawIndexed(base_index, num_indices, base_vertex);
|
|
|
|
}
|
|
|
|
|
|
|
|
void VertexManagerBase::UploadUniforms()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void VertexManagerBase::InvalidateConstants()
|
|
|
|
{
|
|
|
|
VertexShaderManager::dirty = true;
|
|
|
|
GeometryShaderManager::dirty = true;
|
|
|
|
PixelShaderManager::dirty = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void VertexManagerBase::UploadUtilityUniforms(const void* uniforms, u32 uniforms_size)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-11-27 00:16:53 -07:00
|
|
|
void VertexManagerBase::UploadUtilityVertices(const void* vertices, u32 vertex_stride,
|
|
|
|
u32 num_vertices, const u16* indices, u32 num_indices,
|
|
|
|
u32* out_base_vertex, u32* out_base_index)
|
|
|
|
{
|
|
|
|
// The GX vertex list should be flushed before any utility draws occur.
|
|
|
|
ASSERT(m_is_flushed);
|
|
|
|
|
|
|
|
// Copy into the buffers usually used for GX drawing.
|
2019-02-14 18:59:50 -07:00
|
|
|
ResetBuffer(std::max(vertex_stride, 1u));
|
2018-11-27 00:16:53 -07:00
|
|
|
if (vertices)
|
|
|
|
{
|
|
|
|
const u32 copy_size = vertex_stride * num_vertices;
|
|
|
|
ASSERT((m_cur_buffer_pointer + copy_size) <= m_end_buffer_pointer);
|
|
|
|
std::memcpy(m_cur_buffer_pointer, vertices, copy_size);
|
|
|
|
m_cur_buffer_pointer += copy_size;
|
|
|
|
}
|
|
|
|
if (indices)
|
2019-12-05 08:01:33 -07:00
|
|
|
m_index_generator.AddExternalIndices(indices, num_indices, num_vertices);
|
2018-11-27 00:16:53 -07:00
|
|
|
|
|
|
|
CommitBuffer(num_vertices, vertex_stride, num_indices, out_base_vertex, out_base_index);
|
|
|
|
}
|
|
|
|
|
2019-02-14 18:59:50 -07:00
|
|
|
u32 VertexManagerBase::GetTexelBufferElementSize(TexelBufferFormat buffer_format)
|
|
|
|
{
|
|
|
|
// R8 - 1, R16 - 2, RGBA8 - 4, R32G32 - 8
|
|
|
|
return 1u << static_cast<u32>(buffer_format);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool VertexManagerBase::UploadTexelBuffer(const void* data, u32 data_size, TexelBufferFormat format,
|
|
|
|
u32* out_offset)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool VertexManagerBase::UploadTexelBuffer(const void* data, u32 data_size, TexelBufferFormat format,
|
|
|
|
u32* out_offset, const void* palette_data,
|
|
|
|
u32 palette_size, TexelBufferFormat palette_format,
|
|
|
|
u32* palette_offset)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void VertexManagerBase::LoadTextures()
|
|
|
|
{
|
|
|
|
BitSet32 usedtextures;
|
|
|
|
for (u32 i = 0; i < bpmem.genMode.numtevstages + 1u; ++i)
|
|
|
|
if (bpmem.tevorders[i / 2].getEnable(i & 1))
|
|
|
|
usedtextures[bpmem.tevorders[i / 2].getTexMap(i & 1)] = true;
|
|
|
|
|
|
|
|
if (bpmem.genMode.numindstages > 0)
|
|
|
|
for (unsigned int i = 0; i < bpmem.genMode.numtevstages + 1u; ++i)
|
|
|
|
if (bpmem.tevind[i].IsActive() && bpmem.tevind[i].bt < bpmem.genMode.numindstages)
|
|
|
|
usedtextures[bpmem.tevindref.getTexMap(bpmem.tevind[i].bt)] = true;
|
|
|
|
|
|
|
|
for (unsigned int i : usedtextures)
|
|
|
|
g_texture_cache->Load(i);
|
|
|
|
|
|
|
|
g_texture_cache->BindTextures();
|
|
|
|
}
|
|
|
|
|
2015-11-01 14:54:41 -07:00
|
|
|
void VertexManagerBase::Flush()
|
2010-10-03 02:20:24 -06:00
|
|
|
{
|
2016-08-21 21:02:37 -06:00
|
|
|
if (m_is_flushed)
|
2016-06-24 02:43:46 -06:00
|
|
|
return;
|
2013-03-19 19:51:12 -06:00
|
|
|
|
2019-02-14 18:59:50 -07:00
|
|
|
m_is_flushed = true;
|
|
|
|
|
2020-03-31 21:16:07 -06:00
|
|
|
if (xfmem.numTexGen.numTexGens != bpmem.genMode.numtexgens ||
|
|
|
|
xfmem.numChan.numColorChans != bpmem.genMode.numcolchans)
|
|
|
|
{
|
2020-11-13 20:33:26 -07:00
|
|
|
ERROR_LOG_FMT(
|
|
|
|
VIDEO,
|
|
|
|
"Mismatched configuration between XF and BP stages - {}/{} texgens, {}/{} colors. "
|
|
|
|
"Skipping draw. Please report on the issue tracker.",
|
|
|
|
xfmem.numTexGen.numTexGens, bpmem.genMode.numtexgens.Value(), xfmem.numChan.numColorChans,
|
|
|
|
bpmem.genMode.numcolchans.Value());
|
2020-04-01 01:32:18 -06:00
|
|
|
|
|
|
|
// Analytics reporting so we can discover which games have this problem, that way when we
|
|
|
|
// eventually simulate the behavior we have test cases for it.
|
|
|
|
if (xfmem.numTexGen.numTexGens != bpmem.genMode.numtexgens)
|
|
|
|
{
|
|
|
|
DolphinAnalytics::Instance().ReportGameQuirk(
|
|
|
|
GameQuirk::MISMATCHED_GPU_TEXGENS_BETWEEN_XF_AND_BP);
|
|
|
|
}
|
|
|
|
if (xfmem.numChan.numColorChans != bpmem.genMode.numcolchans)
|
|
|
|
{
|
|
|
|
DolphinAnalytics::Instance().ReportGameQuirk(
|
2020-04-23 18:22:51 -06:00
|
|
|
GameQuirk::MISMATCHED_GPU_COLORS_BETWEEN_XF_AND_BP);
|
2020-04-01 01:32:18 -06:00
|
|
|
}
|
|
|
|
|
2020-03-31 21:16:07 -06:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-01-21 02:47:00 -07:00
|
|
|
#if defined(_DEBUG) || defined(DEBUGFAST)
|
2020-11-13 20:33:26 -07:00
|
|
|
PRIM_LOG("frame{}:\n texgen={}, numchan={}, dualtex={}, ztex={}, cole={}, alpe={}, ze={}",
|
2016-06-24 02:43:46 -06:00
|
|
|
g_ActiveConfig.iSaveTargetId, xfmem.numTexGen.numTexGens, xfmem.numChan.numColorChans,
|
2017-06-13 06:06:08 -06:00
|
|
|
xfmem.dualTexTrans.enabled, bpmem.ztex2.op.Value(), bpmem.blendmode.colorupdate.Value(),
|
|
|
|
bpmem.blendmode.alphaupdate.Value(), bpmem.zmode.updateenable.Value());
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2017-06-13 06:06:08 -06:00
|
|
|
for (u32 i = 0; i < xfmem.numChan.numColorChans; ++i)
|
2016-06-24 02:43:46 -06:00
|
|
|
{
|
|
|
|
LitChannel* ch = &xfmem.color[i];
|
2020-11-13 20:33:26 -07:00
|
|
|
PRIM_LOG("colchan{}: matsrc={}, light={:#x}, ambsrc={}, diffunc={}, attfunc={}", i,
|
2017-06-13 06:06:08 -06:00
|
|
|
ch->matsource.Value(), ch->GetFullLightMask(), ch->ambsource.Value(),
|
|
|
|
ch->diffusefunc.Value(), ch->attnfunc.Value());
|
2016-06-24 02:43:46 -06:00
|
|
|
ch = &xfmem.alpha[i];
|
2020-11-13 20:33:26 -07:00
|
|
|
PRIM_LOG("alpchan{}: matsrc={}, light={:#x}, ambsrc={}, diffunc={}, attfunc={}", i,
|
2017-06-13 06:06:08 -06:00
|
|
|
ch->matsource.Value(), ch->GetFullLightMask(), ch->ambsource.Value(),
|
|
|
|
ch->diffusefunc.Value(), ch->attnfunc.Value());
|
2016-06-24 02:43:46 -06:00
|
|
|
}
|
|
|
|
|
2017-06-13 06:06:08 -06:00
|
|
|
for (u32 i = 0; i < xfmem.numTexGen.numTexGens; ++i)
|
2016-06-24 02:43:46 -06:00
|
|
|
{
|
|
|
|
TexMtxInfo tinfo = xfmem.texMtxInfo[i];
|
2021-02-10 17:01:42 -07:00
|
|
|
if (tinfo.texgentype != TexGenType::EmbossMap)
|
2016-06-24 02:43:46 -06:00
|
|
|
tinfo.hex &= 0x7ff;
|
2021-02-10 17:01:42 -07:00
|
|
|
if (tinfo.texgentype != TexGenType::Regular)
|
|
|
|
tinfo.projection = TexSize::ST;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2020-11-13 20:33:26 -07:00
|
|
|
PRIM_LOG("txgen{}: proj={}, input={}, gentype={}, srcrow={}, embsrc={}, emblght={}, "
|
|
|
|
"postmtx={}, postnorm={}",
|
2017-06-13 06:06:08 -06:00
|
|
|
i, tinfo.projection.Value(), tinfo.inputform.Value(), tinfo.texgentype.Value(),
|
|
|
|
tinfo.sourcerow.Value(), tinfo.embosssourceshift.Value(),
|
|
|
|
tinfo.embosslightshift.Value(), xfmem.postMtxInfo[i].index.Value(),
|
|
|
|
xfmem.postMtxInfo[i].normalize.Value());
|
2016-06-24 02:43:46 -06:00
|
|
|
}
|
|
|
|
|
2020-11-13 20:33:26 -07:00
|
|
|
PRIM_LOG("pixel: tev={}, ind={}, texgen={}, dstalpha={}, alphatest={:#x}",
|
2017-06-13 06:06:08 -06:00
|
|
|
bpmem.genMode.numtevstages.Value() + 1, bpmem.genMode.numindstages.Value(),
|
|
|
|
bpmem.genMode.numtexgens.Value(), bpmem.dstalpha.enable.Value(),
|
2016-06-24 02:43:46 -06:00
|
|
|
(bpmem.alpha_test.hex >> 16) & 0xff);
|
2014-01-21 02:47:00 -07:00
|
|
|
#endif
|
|
|
|
|
2017-03-03 15:36:51 -07:00
|
|
|
// Track some stats used elsewhere by the anamorphic widescreen heuristic.
|
|
|
|
if (!SConfig::GetInstance().bWii)
|
|
|
|
{
|
2021-02-10 17:01:42 -07:00
|
|
|
const bool is_perspective = xfmem.projection.type == ProjectionType::Perspective;
|
2020-01-25 18:04:50 -07:00
|
|
|
|
|
|
|
auto& counts =
|
|
|
|
is_perspective ? m_flush_statistics.perspective : m_flush_statistics.orthographic;
|
|
|
|
|
|
|
|
if (IsAnamorphicProjection(xfmem.projection.rawProjection, xfmem.viewport))
|
|
|
|
{
|
|
|
|
++counts.anamorphic_flush_count;
|
|
|
|
counts.anamorphic_vertex_count += m_index_generator.GetIndexLen();
|
|
|
|
}
|
|
|
|
else if (IsNormalProjection(xfmem.projection.rawProjection, xfmem.viewport))
|
2017-03-03 15:36:51 -07:00
|
|
|
{
|
2020-01-25 18:04:50 -07:00
|
|
|
++counts.normal_flush_count;
|
|
|
|
counts.normal_vertex_count += m_index_generator.GetIndexLen();
|
2017-03-03 15:36:51 -07:00
|
|
|
}
|
2020-01-25 18:04:50 -07:00
|
|
|
else
|
2017-03-03 15:36:51 -07:00
|
|
|
{
|
2020-01-25 18:04:50 -07:00
|
|
|
++counts.other_flush_count;
|
|
|
|
counts.other_vertex_count += m_index_generator.GetIndexLen();
|
2017-03-03 15:36:51 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-24 02:43:46 -06:00
|
|
|
// Calculate ZSlope for zfreeze
|
2019-02-14 18:59:50 -07:00
|
|
|
VertexShaderManager::SetConstants();
|
2016-06-24 02:43:46 -06:00
|
|
|
if (!bpmem.genMode.zfreeze)
|
|
|
|
{
|
|
|
|
// Must be done after VertexShaderManager::SetConstants()
|
|
|
|
CalculateZSlope(VertexLoaderManager::GetCurrentVertexFormat());
|
|
|
|
}
|
2016-08-21 21:02:37 -06:00
|
|
|
else if (m_zslope.dirty && !m_cull_all) // or apply any dirty ZSlopes
|
2016-06-24 02:43:46 -06:00
|
|
|
{
|
2016-08-21 21:02:37 -06:00
|
|
|
PixelShaderManager::SetZSlope(m_zslope.dfdx, m_zslope.dfdy, m_zslope.f0);
|
|
|
|
m_zslope.dirty = false;
|
2016-06-24 02:43:46 -06:00
|
|
|
}
|
|
|
|
|
2016-08-21 21:02:37 -06:00
|
|
|
if (!m_cull_all)
|
2016-06-24 02:43:46 -06:00
|
|
|
{
|
2019-02-14 18:59:50 -07:00
|
|
|
// Now the vertices can be flushed to the GPU. Everything following the CommitBuffer() call
|
|
|
|
// must be careful to not upload any utility vertices, as the binding will be lost otherwise.
|
2019-12-05 08:01:33 -07:00
|
|
|
const u32 num_indices = m_index_generator.GetIndexLen();
|
2018-11-27 00:16:53 -07:00
|
|
|
u32 base_vertex, base_index;
|
2019-12-05 08:01:33 -07:00
|
|
|
CommitBuffer(m_index_generator.GetNumVerts(),
|
2018-11-27 00:16:53 -07:00
|
|
|
VertexLoaderManager::GetCurrentVertexFormat()->GetVertexStride(), num_indices,
|
|
|
|
&base_vertex, &base_index);
|
|
|
|
|
2019-02-14 18:59:50 -07:00
|
|
|
// Texture loading can cause palettes to be applied (-> uniforms -> draws).
|
|
|
|
// Palette application does not use vertices, only a full-screen quad, so this is okay.
|
|
|
|
// Same with GPU texture decoding, which uses compute shaders.
|
|
|
|
LoadTextures();
|
|
|
|
|
|
|
|
// Now we can upload uniforms, as nothing else will override them.
|
|
|
|
GeometryShaderManager::SetConstants();
|
|
|
|
PixelShaderManager::SetConstants();
|
|
|
|
UploadUniforms();
|
|
|
|
|
2018-02-24 08:15:35 -07:00
|
|
|
// Update the pipeline, or compile one if needed.
|
|
|
|
UpdatePipelineConfig();
|
|
|
|
UpdatePipelineObject();
|
2018-11-27 00:16:53 -07:00
|
|
|
if (m_current_pipeline_object)
|
|
|
|
{
|
|
|
|
g_renderer->SetPipeline(m_current_pipeline_object);
|
|
|
|
if (PerfQueryBase::ShouldEmulate())
|
|
|
|
g_perf_query->EnableQuery(bpmem.zcontrol.early_ztest ? PQG_ZCOMP_ZCOMPLOC : PQG_ZCOMP);
|
2018-02-24 08:15:35 -07:00
|
|
|
|
2018-11-27 00:16:53 -07:00
|
|
|
DrawCurrentBatch(base_index, num_indices, base_vertex);
|
2019-07-10 21:34:50 -06:00
|
|
|
INCSTAT(g_stats.this_frame.num_draw_calls);
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2018-11-27 00:16:53 -07:00
|
|
|
if (PerfQueryBase::ShouldEmulate())
|
|
|
|
g_perf_query->DisableQuery(bpmem.zcontrol.early_ztest ? PQG_ZCOMP_ZCOMPLOC : PQG_ZCOMP);
|
2019-02-14 18:59:50 -07:00
|
|
|
|
|
|
|
OnDraw();
|
2019-03-02 00:05:38 -07:00
|
|
|
|
|
|
|
// The EFB cache is now potentially stale.
|
|
|
|
g_framebuffer_manager->FlagPeekCacheAsOutOfDate();
|
2018-11-27 00:16:53 -07:00
|
|
|
}
|
2016-06-24 02:43:46 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
if (xfmem.numTexGen.numTexGens != bpmem.genMode.numtexgens)
|
2019-02-14 18:59:50 -07:00
|
|
|
{
|
2020-11-13 20:33:26 -07:00
|
|
|
ERROR_LOG_FMT(VIDEO,
|
|
|
|
"xf.numtexgens ({}) does not match bp.numtexgens ({}). Error in command stream.",
|
|
|
|
xfmem.numTexGen.numTexGens, bpmem.genMode.numtexgens.Value());
|
2019-02-14 18:59:50 -07:00
|
|
|
}
|
2010-10-03 02:20:24 -06:00
|
|
|
}
|
2010-10-19 16:24:27 -06:00
|
|
|
|
2015-11-01 14:54:41 -07:00
|
|
|
void VertexManagerBase::DoState(PointerWrap& p)
|
2012-01-04 01:42:22 -07:00
|
|
|
{
|
2019-06-29 03:27:53 -06:00
|
|
|
if (p.GetMode() == PointerWrap::MODE_READ)
|
|
|
|
{
|
|
|
|
// Flush old vertex data before loading state.
|
|
|
|
Flush();
|
|
|
|
|
|
|
|
// Clear all caches that touch RAM
|
|
|
|
// (? these don't appear to touch any emulation state that gets saved. moved to on load only.)
|
|
|
|
VertexLoaderManager::MarkAllDirty();
|
|
|
|
}
|
|
|
|
|
2016-08-21 21:02:37 -06:00
|
|
|
p.Do(m_zslope);
|
2012-01-04 01:42:22 -07:00
|
|
|
}
|
2014-12-26 01:25:24 -07:00
|
|
|
|
2015-11-01 14:54:41 -07:00
|
|
|
void VertexManagerBase::CalculateZSlope(NativeVertexFormat* format)
|
2014-12-26 01:25:24 -07:00
|
|
|
{
|
2016-06-24 02:43:46 -06:00
|
|
|
float out[12];
|
|
|
|
float viewOffset[2] = {xfmem.viewport.xOrig - bpmem.scissorOffset.x * 2,
|
|
|
|
xfmem.viewport.yOrig - bpmem.scissorOffset.y * 2};
|
|
|
|
|
2017-04-30 02:07:57 -06:00
|
|
|
if (m_current_primitive_type != PrimitiveType::Triangles &&
|
|
|
|
m_current_primitive_type != PrimitiveType::TriangleStrip)
|
|
|
|
{
|
2016-06-24 02:43:46 -06:00
|
|
|
return;
|
2017-04-30 02:07:57 -06:00
|
|
|
}
|
2016-06-24 02:43:46 -06:00
|
|
|
|
|
|
|
// Global matrix ID.
|
|
|
|
u32 mtxIdx = g_main_cp_state.matrix_index_a.PosNormalMtxIdx;
|
|
|
|
const PortableVertexDeclaration vert_decl = format->GetVertexDeclaration();
|
|
|
|
|
|
|
|
// Make sure the buffer contains at least 3 vertices.
|
2016-08-21 21:02:37 -06:00
|
|
|
if ((m_cur_buffer_pointer - m_base_buffer_pointer) < (vert_decl.stride * 3))
|
2016-06-24 02:43:46 -06:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Lookup vertices of the last rendered triangle and software-transform them
|
|
|
|
// This allows us to determine the depth slope, which will be used if z-freeze
|
|
|
|
// is enabled in the following flush.
|
|
|
|
for (unsigned int i = 0; i < 3; ++i)
|
|
|
|
{
|
|
|
|
// If this vertex format has per-vertex position matrix IDs, look it up.
|
|
|
|
if (vert_decl.posmtx.enable)
|
2016-05-07 01:35:40 -06:00
|
|
|
mtxIdx = VertexLoaderManager::position_matrix_index[3 - i];
|
2016-06-24 02:43:46 -06:00
|
|
|
|
|
|
|
if (vert_decl.position.components == 2)
|
|
|
|
VertexLoaderManager::position_cache[2 - i][2] = 0;
|
|
|
|
|
|
|
|
VertexShaderManager::TransformToClipSpace(&VertexLoaderManager::position_cache[2 - i][0],
|
|
|
|
&out[i * 4], mtxIdx);
|
|
|
|
|
|
|
|
// Transform to Screenspace
|
|
|
|
float inv_w = 1.0f / out[3 + i * 4];
|
|
|
|
|
|
|
|
out[0 + i * 4] = out[0 + i * 4] * inv_w * xfmem.viewport.wd + viewOffset[0];
|
|
|
|
out[1 + i * 4] = out[1 + i * 4] * inv_w * xfmem.viewport.ht + viewOffset[1];
|
|
|
|
out[2 + i * 4] = out[2 + i * 4] * inv_w * xfmem.viewport.zRange + xfmem.viewport.farZ;
|
|
|
|
}
|
|
|
|
|
|
|
|
float dx31 = out[8] - out[0];
|
|
|
|
float dx12 = out[0] - out[4];
|
|
|
|
float dy12 = out[1] - out[5];
|
|
|
|
float dy31 = out[9] - out[1];
|
|
|
|
|
|
|
|
float DF31 = out[10] - out[2];
|
|
|
|
float DF21 = out[6] - out[2];
|
|
|
|
float a = DF31 * -dy12 - DF21 * dy31;
|
|
|
|
float b = dx31 * DF21 + dx12 * DF31;
|
|
|
|
float c = -dx12 * dy31 - dx31 * -dy12;
|
|
|
|
|
|
|
|
// Sometimes we process de-generate triangles. Stop any divide by zeros
|
|
|
|
if (c == 0)
|
|
|
|
return;
|
|
|
|
|
2016-08-21 21:02:37 -06:00
|
|
|
m_zslope.dfdx = -a / c;
|
|
|
|
m_zslope.dfdy = -b / c;
|
|
|
|
m_zslope.f0 = out[2] - (out[0] * m_zslope.dfdx + out[1] * m_zslope.dfdy);
|
|
|
|
m_zslope.dirty = true;
|
2014-12-26 01:25:24 -07:00
|
|
|
}
|
2018-02-24 08:15:35 -07:00
|
|
|
|
|
|
|
void VertexManagerBase::UpdatePipelineConfig()
|
|
|
|
{
|
|
|
|
NativeVertexFormat* vertex_format = VertexLoaderManager::GetCurrentVertexFormat();
|
|
|
|
if (vertex_format != m_current_pipeline_config.vertex_format)
|
|
|
|
{
|
|
|
|
m_current_pipeline_config.vertex_format = vertex_format;
|
|
|
|
m_current_uber_pipeline_config.vertex_format =
|
|
|
|
VertexLoaderManager::GetUberVertexFormat(vertex_format->GetVertexDeclaration());
|
|
|
|
m_pipeline_config_changed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
VertexShaderUid vs_uid = GetVertexShaderUid();
|
|
|
|
if (vs_uid != m_current_pipeline_config.vs_uid)
|
|
|
|
{
|
|
|
|
m_current_pipeline_config.vs_uid = vs_uid;
|
|
|
|
m_current_uber_pipeline_config.vs_uid = UberShader::GetVertexShaderUid();
|
|
|
|
m_pipeline_config_changed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
PixelShaderUid ps_uid = GetPixelShaderUid();
|
|
|
|
if (ps_uid != m_current_pipeline_config.ps_uid)
|
|
|
|
{
|
|
|
|
m_current_pipeline_config.ps_uid = ps_uid;
|
|
|
|
m_current_uber_pipeline_config.ps_uid = UberShader::GetPixelShaderUid();
|
|
|
|
m_pipeline_config_changed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
GeometryShaderUid gs_uid = GetGeometryShaderUid(GetCurrentPrimitiveType());
|
|
|
|
if (gs_uid != m_current_pipeline_config.gs_uid)
|
|
|
|
{
|
|
|
|
m_current_pipeline_config.gs_uid = gs_uid;
|
|
|
|
m_current_uber_pipeline_config.gs_uid = gs_uid;
|
|
|
|
m_pipeline_config_changed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (m_rasterization_state_changed)
|
|
|
|
{
|
|
|
|
m_rasterization_state_changed = false;
|
|
|
|
|
|
|
|
RasterizationState new_rs = {};
|
|
|
|
new_rs.Generate(bpmem, m_current_primitive_type);
|
|
|
|
if (new_rs != m_current_pipeline_config.rasterization_state)
|
|
|
|
{
|
|
|
|
m_current_pipeline_config.rasterization_state = new_rs;
|
|
|
|
m_current_uber_pipeline_config.rasterization_state = new_rs;
|
|
|
|
m_pipeline_config_changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (m_depth_state_changed)
|
|
|
|
{
|
|
|
|
m_depth_state_changed = false;
|
|
|
|
|
|
|
|
DepthState new_ds = {};
|
|
|
|
new_ds.Generate(bpmem);
|
|
|
|
if (new_ds != m_current_pipeline_config.depth_state)
|
|
|
|
{
|
|
|
|
m_current_pipeline_config.depth_state = new_ds;
|
|
|
|
m_current_uber_pipeline_config.depth_state = new_ds;
|
|
|
|
m_pipeline_config_changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (m_blending_state_changed)
|
|
|
|
{
|
|
|
|
m_blending_state_changed = false;
|
|
|
|
|
|
|
|
BlendingState new_bs = {};
|
|
|
|
new_bs.Generate(bpmem);
|
|
|
|
if (new_bs != m_current_pipeline_config.blending_state)
|
|
|
|
{
|
|
|
|
m_current_pipeline_config.blending_state = new_bs;
|
|
|
|
m_current_uber_pipeline_config.blending_state = new_bs;
|
|
|
|
m_pipeline_config_changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void VertexManagerBase::UpdatePipelineObject()
|
|
|
|
{
|
|
|
|
if (!m_pipeline_config_changed)
|
|
|
|
return;
|
|
|
|
|
|
|
|
m_current_pipeline_object = nullptr;
|
|
|
|
m_pipeline_config_changed = false;
|
|
|
|
|
2018-03-16 07:10:22 -06:00
|
|
|
switch (g_ActiveConfig.iShaderCompilationMode)
|
|
|
|
{
|
|
|
|
case ShaderCompilationMode::Synchronous:
|
2018-03-01 01:03:24 -07:00
|
|
|
{
|
|
|
|
// Ubershaders disabled? Block and compile the specialized shader.
|
|
|
|
m_current_pipeline_object = g_shader_cache->GetPipelineForUid(m_current_pipeline_config);
|
|
|
|
}
|
2018-03-16 07:10:22 -06:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ShaderCompilationMode::SynchronousUberShaders:
|
|
|
|
{
|
|
|
|
// Exclusive ubershader mode, always use ubershaders.
|
|
|
|
m_current_pipeline_object =
|
|
|
|
g_shader_cache->GetUberPipelineForUid(m_current_uber_pipeline_config);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ShaderCompilationMode::AsynchronousUberShaders:
|
|
|
|
case ShaderCompilationMode::AsynchronousSkipRendering:
|
2018-02-24 08:15:35 -07:00
|
|
|
{
|
|
|
|
// Can we background compile shaders? If so, get the pipeline asynchronously.
|
2018-03-01 01:03:24 -07:00
|
|
|
auto res = g_shader_cache->GetPipelineForUidAsync(m_current_pipeline_config);
|
|
|
|
if (res)
|
2018-02-24 08:15:35 -07:00
|
|
|
{
|
2018-03-01 01:03:24 -07:00
|
|
|
// Specialized shaders are ready, prefer these.
|
|
|
|
m_current_pipeline_object = *res;
|
2018-02-24 08:15:35 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-03-16 07:10:22 -06:00
|
|
|
if (g_ActiveConfig.iShaderCompilationMode == ShaderCompilationMode::AsynchronousUberShaders)
|
|
|
|
{
|
|
|
|
// Specialized shaders not ready, use the ubershaders.
|
|
|
|
m_current_pipeline_object =
|
|
|
|
g_shader_cache->GetUberPipelineForUid(m_current_uber_pipeline_config);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Ensure we try again next draw. Otherwise, if no registers change between frames, the
|
|
|
|
// object will never be drawn, even when the shader is ready.
|
|
|
|
m_pipeline_config_changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2018-02-24 08:15:35 -07:00
|
|
|
}
|
2019-02-14 18:59:50 -07:00
|
|
|
|
|
|
|
void VertexManagerBase::OnDraw()
|
|
|
|
{
|
|
|
|
m_draw_counter++;
|
|
|
|
|
|
|
|
// If we didn't have any CPU access last frame, do nothing.
|
|
|
|
if (m_scheduled_command_buffer_kicks.empty() || !m_allow_background_execution)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Check if this draw is scheduled to kick a command buffer.
|
|
|
|
// The draw counters will always be sorted so a binary search is possible here.
|
|
|
|
if (std::binary_search(m_scheduled_command_buffer_kicks.begin(),
|
|
|
|
m_scheduled_command_buffer_kicks.end(), m_draw_counter))
|
|
|
|
{
|
|
|
|
// Kick a command buffer on the background thread.
|
|
|
|
g_renderer->Flush();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void VertexManagerBase::OnCPUEFBAccess()
|
|
|
|
{
|
|
|
|
// Check this isn't another access without any draws inbetween.
|
|
|
|
if (!m_cpu_accesses_this_frame.empty() && m_cpu_accesses_this_frame.back() == m_draw_counter)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Store the current draw counter for scheduling in OnEndFrame.
|
|
|
|
m_cpu_accesses_this_frame.emplace_back(m_draw_counter);
|
|
|
|
}
|
|
|
|
|
|
|
|
void VertexManagerBase::OnEFBCopyToRAM()
|
|
|
|
{
|
|
|
|
// If we're not deferring, try to preempt it next frame.
|
|
|
|
if (!g_ActiveConfig.bDeferEFBCopies)
|
|
|
|
{
|
|
|
|
OnCPUEFBAccess();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, only execute if we have at least 10 objects between us and the last copy.
|
|
|
|
const u32 diff = m_draw_counter - m_last_efb_copy_draw_counter;
|
|
|
|
m_last_efb_copy_draw_counter = m_draw_counter;
|
|
|
|
if (diff < MINIMUM_DRAW_CALLS_PER_COMMAND_BUFFER_FOR_READBACK)
|
|
|
|
return;
|
|
|
|
|
|
|
|
g_renderer->Flush();
|
|
|
|
}
|
|
|
|
|
|
|
|
void VertexManagerBase::OnEndFrame()
|
|
|
|
{
|
|
|
|
m_draw_counter = 0;
|
|
|
|
m_last_efb_copy_draw_counter = 0;
|
|
|
|
m_scheduled_command_buffer_kicks.clear();
|
|
|
|
|
|
|
|
// If we have no CPU access at all, leave everything in the one command buffer for maximum
|
|
|
|
// parallelism between CPU/GPU, at the cost of slightly higher latency.
|
|
|
|
if (m_cpu_accesses_this_frame.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// In order to reduce CPU readback latency, we want to kick a command buffer roughly halfway
|
|
|
|
// between the draw counters that invoked the readback, or every 250 draws, whichever is smaller.
|
|
|
|
if (g_ActiveConfig.iCommandBufferExecuteInterval > 0)
|
|
|
|
{
|
|
|
|
u32 last_draw_counter = 0;
|
|
|
|
u32 interval = static_cast<u32>(g_ActiveConfig.iCommandBufferExecuteInterval);
|
|
|
|
for (u32 draw_counter : m_cpu_accesses_this_frame)
|
|
|
|
{
|
|
|
|
// We don't want to waste executing command buffers for only a few draws, so set a minimum.
|
|
|
|
// Leave last_draw_counter as-is, so we get the correct number of draws between submissions.
|
|
|
|
u32 draw_count = draw_counter - last_draw_counter;
|
|
|
|
if (draw_count < MINIMUM_DRAW_CALLS_PER_COMMAND_BUFFER_FOR_READBACK)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (draw_count <= interval)
|
|
|
|
{
|
|
|
|
u32 mid_point = draw_count / 2;
|
|
|
|
m_scheduled_command_buffer_kicks.emplace_back(last_draw_counter + mid_point);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
u32 counter = interval;
|
|
|
|
while (counter < draw_count)
|
|
|
|
{
|
|
|
|
m_scheduled_command_buffer_kicks.emplace_back(last_draw_counter + counter);
|
|
|
|
counter += interval;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
last_draw_counter = draw_counter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
{
|
2019-09-14 14:40:34 -06:00
|
|
|
std::ostringstream ss;
|
2019-02-14 18:59:50 -07:00
|
|
|
std::for_each(m_cpu_accesses_this_frame.begin(), m_cpu_accesses_this_frame.end(), [&ss](u32 idx) { ss << idx << ","; });
|
2020-11-13 20:33:26 -07:00
|
|
|
WARN_LOG_FMT(VIDEO, "CPU EFB accesses in last frame: {}", ss.str());
|
2019-02-14 18:59:50 -07:00
|
|
|
}
|
|
|
|
{
|
2019-09-14 14:40:34 -06:00
|
|
|
std::ostringstream ss;
|
2019-02-14 18:59:50 -07:00
|
|
|
std::for_each(m_scheduled_command_buffer_kicks.begin(), m_scheduled_command_buffer_kicks.end(), [&ss](u32 idx) { ss << idx << ","; });
|
2020-11-13 20:33:26 -07:00
|
|
|
WARN_LOG_FMT(VIDEO, "Scheduled command buffer kicks: {}", ss.str());
|
2019-02-14 18:59:50 -07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
m_cpu_accesses_this_frame.clear();
|
|
|
|
}
|