dolphin/Source/Core/VideoCommon/VertexLoaderManager.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

419 lines
15 KiB
C++
Raw Normal View History

// Copyright 2008 Dolphin Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "VideoCommon/VertexLoaderManager.h"
#include <algorithm>
#include <iterator>
#include <memory>
#include <mutex>
#include <string>
2023-03-23 11:49:09 -06:00
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
#include "Common/CommonTypes.h"
2021-06-20 14:47:57 -06:00
#include "Common/EnumMap.h"
#include "Common/Logging/Log.h"
#include "Core/DolphinAnalytics.h"
#include "Core/HW/Memmap.h"
#include "Core/System.h"
#include "VideoCommon/AbstractGfx.h"
#include "VideoCommon/BPMemory.h"
#include "VideoCommon/CPMemory.h"
#include "VideoCommon/DataReader.h"
#include "VideoCommon/IndexGenerator.h"
#include "VideoCommon/NativeVertexFormat.h"
#include "VideoCommon/Statistics.h"
#include "VideoCommon/VertexLoaderBase.h"
#include "VideoCommon/VertexManagerBase.h"
#include "VideoCommon/VertexShaderManager.h"
2022-07-26 02:57:30 -06:00
#include "VideoCommon/VideoConfig.h"
#include "VideoCommon/XFMemory.h"
namespace VertexLoaderManager
{
// Used by zfreeze
std::array<u32, 3> position_matrix_index_cache;
// 3 vertices, 4 floats each to allow SIMD overwrite
alignas(sizeof(std::array<float, 4>)) std::array<std::array<float, 4>, 3> position_cache;
alignas(sizeof(std::array<float, 4>)) std::array<float, 4> tangent_cache;
alignas(sizeof(std::array<float, 4>)) std::array<float, 4> binormal_cache;
static NativeVertexFormatMap s_native_vertex_map;
static NativeVertexFormat* s_current_vtx_fmt;
u32 g_current_components;
typedef std::unordered_map<VertexLoaderUID, std::unique_ptr<VertexLoaderBase>> VertexLoaderMap;
static std::mutex s_vertex_loader_map_lock;
static VertexLoaderMap s_vertex_loader_map;
// TODO - change into array of pointers. Keep a map of all seen so far.
2021-06-20 14:47:57 -06:00
Common::EnumMap<u8*, CPArray::TexCoord7> cached_arraybases;
BitSet8 g_main_vat_dirty;
BitSet8 g_preprocess_vat_dirty;
bool g_bases_dirty; // Main only
std::array<VertexLoaderBase*, CP_NUM_VAT_REG> g_main_vertex_loaders;
std::array<VertexLoaderBase*, CP_NUM_VAT_REG> g_preprocess_vertex_loaders;
bool g_needs_cp_xf_consistency_check;
void Init()
{
MarkAllDirty();
for (auto& map_entry : g_main_vertex_loaders)
map_entry = nullptr;
for (auto& map_entry : g_preprocess_vertex_loaders)
map_entry = nullptr;
SETSTAT(g_stats.num_vertex_loaders, 0);
}
void Clear()
{
std::lock_guard<std::mutex> lk(s_vertex_loader_map_lock);
s_vertex_loader_map.clear();
s_native_vertex_map.clear();
}
void UpdateVertexArrayPointers()
{
// Anything to update?
if (!g_bases_dirty) [[likely]]
return;
auto& system = Core::System::GetInstance();
auto& memory = system.GetMemory();
// Some games such as Burnout 2 can put invalid addresses into
// the array base registers. (see issue 8591)
// But the vertex arrays with invalid addresses aren't actually enabled.
// Note: Only array bases 0 through 11 are used by the Vertex loaders.
// 12 through 15 are used for loading data into xfmem.
// We also only update the array base if the vertex description states we are going to use it.
if (IsIndexed(g_main_cp_state.vtx_desc.low.Position))
2021-06-20 14:47:57 -06:00
cached_arraybases[CPArray::Position] =
memory.GetPointer(g_main_cp_state.array_bases[CPArray::Position]);
if (IsIndexed(g_main_cp_state.vtx_desc.low.Normal))
2021-06-20 14:47:57 -06:00
cached_arraybases[CPArray::Normal] =
memory.GetPointer(g_main_cp_state.array_bases[CPArray::Normal]);
2021-06-20 14:47:57 -06:00
for (u8 i = 0; i < g_main_cp_state.vtx_desc.low.Color.Size(); i++)
{
if (IsIndexed(g_main_cp_state.vtx_desc.low.Color[i]))
2021-06-20 14:47:57 -06:00
cached_arraybases[CPArray::Color0 + i] =
memory.GetPointer(g_main_cp_state.array_bases[CPArray::Color0 + i]);
}
2021-06-20 14:47:57 -06:00
for (u8 i = 0; i < g_main_cp_state.vtx_desc.high.TexCoord.Size(); i++)
{
if (IsIndexed(g_main_cp_state.vtx_desc.high.TexCoord[i]))
2021-06-20 14:47:57 -06:00
cached_arraybases[CPArray::TexCoord0 + i] =
memory.GetPointer(g_main_cp_state.array_bases[CPArray::TexCoord0 + i]);
}
g_bases_dirty = false;
}
namespace
{
struct entry
{
std::string text;
u64 num_verts;
bool operator<(const entry& other) const { return num_verts > other.num_verts; }
};
} // namespace
void MarkAllDirty()
{
g_bases_dirty = true;
g_main_vat_dirty = BitSet8::AllTrue(8);
g_preprocess_vat_dirty = BitSet8::AllTrue(8);
g_needs_cp_xf_consistency_check = true;
}
NativeVertexFormat* GetOrCreateMatchingFormat(const PortableVertexDeclaration& decl)
{
auto iter = s_native_vertex_map.find(decl);
if (iter == s_native_vertex_map.end())
{
std::unique_ptr<NativeVertexFormat> fmt = g_gfx->CreateNativeVertexFormat(decl);
auto ipair = s_native_vertex_map.emplace(decl, std::move(fmt));
iter = ipair.first;
}
return iter->second.get();
}
NativeVertexFormat* GetUberVertexFormat(const PortableVertexDeclaration& decl)
{
// The padding in the structs can cause the memcmp() in the map to create duplicates.
// Avoid this by initializing the padding to zero.
PortableVertexDeclaration new_decl;
2023-03-23 11:49:09 -06:00
static_assert(std::is_trivially_copyable_v<PortableVertexDeclaration>);
std::memset(static_cast<void*>(&new_decl), 0, sizeof(new_decl));
new_decl.stride = decl.stride;
2021-06-26 13:48:28 -06:00
auto MakeDummyAttribute = [](AttributeFormat& attr, ComponentFormat type, int components,
bool integer) {
attr.type = type;
attr.components = components;
attr.offset = 0;
attr.enable = true;
attr.integer = integer;
};
auto CopyAttribute = [](AttributeFormat& attr, const AttributeFormat& src) {
attr.type = src.type;
attr.components = src.components;
attr.offset = src.offset;
attr.enable = src.enable;
attr.integer = src.integer;
};
if (decl.position.enable)
CopyAttribute(new_decl.position, decl.position);
else
2021-06-26 13:48:28 -06:00
MakeDummyAttribute(new_decl.position, ComponentFormat::Float, 1, false);
for (size_t i = 0; i < std::size(new_decl.normals); i++)
{
if (decl.normals[i].enable)
CopyAttribute(new_decl.normals[i], decl.normals[i]);
else
2021-06-26 13:48:28 -06:00
MakeDummyAttribute(new_decl.normals[i], ComponentFormat::Float, 1, false);
}
for (size_t i = 0; i < std::size(new_decl.colors); i++)
{
if (decl.colors[i].enable)
CopyAttribute(new_decl.colors[i], decl.colors[i]);
else
2021-06-26 13:48:28 -06:00
MakeDummyAttribute(new_decl.colors[i], ComponentFormat::UByte, 4, false);
}
for (size_t i = 0; i < std::size(new_decl.texcoords); i++)
{
if (decl.texcoords[i].enable)
CopyAttribute(new_decl.texcoords[i], decl.texcoords[i]);
else
2021-06-26 13:48:28 -06:00
MakeDummyAttribute(new_decl.texcoords[i], ComponentFormat::Float, 1, false);
}
if (decl.posmtx.enable)
CopyAttribute(new_decl.posmtx, decl.posmtx);
else
2021-06-26 13:48:28 -06:00
MakeDummyAttribute(new_decl.posmtx, ComponentFormat::UByte, 1, true);
return GetOrCreateMatchingFormat(new_decl);
}
namespace detail
{
template <bool IsPreprocess>
VertexLoaderBase* GetOrCreateLoader(int vtx_attr_group)
{
constexpr CPState* state = IsPreprocess ? &g_preprocess_cp_state : &g_main_cp_state;
constexpr BitSet8& attr_dirty = IsPreprocess ? g_preprocess_vat_dirty : g_main_vat_dirty;
constexpr auto& vertex_loaders =
IsPreprocess ? g_preprocess_vertex_loaders : g_main_vertex_loaders;
VertexLoaderBase* loader;
// We are not allowed to create a native vertex format on preprocessing as this is on the wrong
// thread
bool check_for_native_format = !IsPreprocess;
VertexLoaderUID uid(state->vtx_desc, state->vtx_attr[vtx_attr_group]);
std::lock_guard<std::mutex> lk(s_vertex_loader_map_lock);
VertexLoaderMap::iterator iter = s_vertex_loader_map.find(uid);
if (iter != s_vertex_loader_map.end())
{
loader = iter->second.get();
check_for_native_format &= !loader->m_native_vertex_format;
}
else
{
auto [it, added] = s_vertex_loader_map.try_emplace(
uid,
VertexLoaderBase::CreateVertexLoader(state->vtx_desc, state->vtx_attr[vtx_attr_group]));
loader = it->second.get();
INCSTAT(g_stats.num_vertex_loaders);
}
if (check_for_native_format)
{
// search for a cached native vertex format
loader->m_native_vertex_format = GetOrCreateMatchingFormat(loader->m_native_vtx_decl);
}
vertex_loaders[vtx_attr_group] = loader;
attr_dirty[vtx_attr_group] = false;
return loader;
}
} // namespace detail
static void CheckCPConfiguration(int vtx_attr_group)
{
// Validate that the XF input configuration matches the CP configuration
u32 num_cp_colors = std::count_if(
g_main_cp_state.vtx_desc.low.Color.begin(), g_main_cp_state.vtx_desc.low.Color.end(),
[](auto format) { return format != VertexComponentFormat::NotPresent; });
u32 num_cp_tex_coords = std::count_if(
g_main_cp_state.vtx_desc.high.TexCoord.begin(), g_main_cp_state.vtx_desc.high.TexCoord.end(),
[](auto format) { return format != VertexComponentFormat::NotPresent; });
u32 num_cp_normals;
if (g_main_cp_state.vtx_desc.low.Normal == VertexComponentFormat::NotPresent)
num_cp_normals = 0;
else if (g_main_cp_state.vtx_attr[vtx_attr_group].g0.NormalElements == NormalComponentCount::NTB)
num_cp_normals = 3;
else
num_cp_normals = 1;
std::optional<u32> num_xf_normals;
switch (xfmem.invtxspec.numnormals)
{
case NormalCount::None:
num_xf_normals = 0;
break;
case NormalCount::Normal:
num_xf_normals = 1;
break;
case NormalCount::NormalTangentBinormal:
case NormalCount::Invalid: // see https://bugs.dolphin-emu.org/issues/13070
num_xf_normals = 3;
break;
}
if (num_cp_colors != xfmem.invtxspec.numcolors || num_cp_normals != num_xf_normals ||
num_cp_tex_coords != xfmem.invtxspec.numtextures) [[unlikely]]
{
PanicAlertFmt("Mismatched configuration between CP and XF stages - {}/{} colors, {}/{} "
"normals, {}/{} texture coordinates. Please report on the issue tracker.\n\n"
"VCD: {:08x} {:08x}\nVAT {}: {:08x} {:08x} {:08x}\nXF vertex spec: {:08x}",
num_cp_colors, xfmem.invtxspec.numcolors, num_cp_normals,
num_xf_normals.has_value() ? fmt::to_string(num_xf_normals.value()) : "invalid",
num_cp_tex_coords, xfmem.invtxspec.numtextures, g_main_cp_state.vtx_desc.low.Hex,
g_main_cp_state.vtx_desc.high.Hex, vtx_attr_group,
g_main_cp_state.vtx_attr[vtx_attr_group].g0.Hex,
g_main_cp_state.vtx_attr[vtx_attr_group].g1.Hex,
g_main_cp_state.vtx_attr[vtx_attr_group].g2.Hex, xfmem.invtxspec.hex);
// Analytics reporting so we can discover which games have this problem, that way when we
// eventually simulate the behavior we have test cases for it.
if (num_cp_colors != xfmem.invtxspec.numcolors) [[unlikely]]
{
DolphinAnalytics::Instance().ReportGameQuirk(
GameQuirk::MISMATCHED_GPU_COLORS_BETWEEN_CP_AND_XF);
}
if (num_cp_normals != num_xf_normals) [[unlikely]]
{
DolphinAnalytics::Instance().ReportGameQuirk(
GameQuirk::MISMATCHED_GPU_NORMALS_BETWEEN_CP_AND_XF);
}
if (num_cp_tex_coords != xfmem.invtxspec.numtextures) [[unlikely]]
{
DolphinAnalytics::Instance().ReportGameQuirk(
GameQuirk::MISMATCHED_GPU_TEX_COORDS_BETWEEN_CP_AND_XF);
}
// Don't bail out, though; we can still render something successfully
// (real hardware seems to hang in this case, though)
}
if (g_main_cp_state.matrix_index_a.Hex != xfmem.MatrixIndexA.Hex ||
g_main_cp_state.matrix_index_b.Hex != xfmem.MatrixIndexB.Hex) [[unlikely]]
{
WARN_LOG_FMT(VIDEO,
"Mismatched matrix index configuration between CP and XF stages - "
"index A: {:08x}/{:08x}, index B {:08x}/{:08x}.",
g_main_cp_state.matrix_index_a.Hex, xfmem.MatrixIndexA.Hex,
g_main_cp_state.matrix_index_b.Hex, xfmem.MatrixIndexB.Hex);
DolphinAnalytics::Instance().ReportGameQuirk(
GameQuirk::MISMATCHED_GPU_MATRIX_INDICES_BETWEEN_CP_AND_XF);
}
}
template <bool IsPreprocess>
int RunVertices(int vtx_attr_group, OpcodeDecoder::Primitive primitive, int count, const u8* src)
{
if (count == 0) [[unlikely]]
2014-12-09 00:35:04 -07:00
return 0;
ASSERT(count > 0);
VertexLoaderBase* loader = RefreshLoader<IsPreprocess>(vtx_attr_group);
int size = count * loader->m_vertex_size;
Refactor opcode decoding a bit to kill FifoCommandRunnable. Separated out from my gpu-determinism branch by request. It's not a big commit; I just like to write long commit messages. The main reason to kill it is hopefully a slight performance improvement from avoiding the double switch (especially in single core mode); however, this also improves cycle calculation, as described below. - FifoCommandRunnable is removed; in its stead, Decode returns the number of cycles (which only matters for "sync" GPU mode), or 0 if there was not enough data, and is also responsible for unknown opcode alerts. Decode and DecodeSemiNop are almost identical, so the latter is replaced with a skipped_frame parameter to Decode. Doesn't mean we can't improve skipped_frame mode to do less work; if, at such a point, branching on it has too much overhead (it certainly won't now), it can always be changed to a template parameter. - FifoCommandRunnable used a fixed, large cycle count for display lists, regardless of the contents. Presumably the actual hardware's processing time is mostly the processing time of whatever commands are in the list, and with this change InterpretDisplayList can just return the list's cycle count to be added to the total. (Since the calculation for this is part of Decode, it didn't seem easy to split this change up.) To facilitate this, Decode also gains an explicit 'end' parameter in lieu of FifoCommandRunnable's call to GetVideoBufferEndPtr, which can point to there or to the end of a display list (or elsewhere in gpu-determinism, but that's another story). Also, as a small optimization, InterpretDisplayList now calls OpcodeDecoder_Run rather than having its own Decode loop, to allow Decode to be inlined (haven't checked whether this actually happens though). skipped_frame mode still does not traverse display lists and uses the old fake value of 45 cycles. degasus has suggested that this hack is not essential for performance and can be removed, but I want to separate any potential performance impact of that from this commit.
2014-08-31 23:11:32 -06:00
if constexpr (!IsPreprocess)
{
// Doing early return for the opposite case would be cleaner
// but triggers a false unreachable code warning in MSVC debug builds.
if (g_needs_cp_xf_consistency_check) [[unlikely]]
{
CheckCPConfiguration(vtx_attr_group);
g_needs_cp_xf_consistency_check = false;
}
// If the native vertex format changed, force a flush.
if (loader->m_native_vertex_format != s_current_vtx_fmt ||
loader->m_native_components != g_current_components) [[unlikely]]
{
g_vertex_manager->Flush();
s_current_vtx_fmt = loader->m_native_vertex_format;
g_current_components = loader->m_native_components;
auto& system = Core::System::GetInstance();
auto& vertex_shader_manager = system.GetVertexShaderManager();
vertex_shader_manager.SetVertexFormat(loader->m_native_components,
loader->m_native_vertex_format->GetVertexDeclaration());
}
2022-07-26 02:57:30 -06:00
// CPUCull's performance increase comes from encoding fewer GPU commands, not sending less data
// Therefore it's only useful to check if culling could remove a flush
const bool can_cpu_cull = g_ActiveConfig.bCPUCull &&
primitive < OpcodeDecoder::Primitive::GX_DRAW_LINES &&
!g_vertex_manager->HasSendableVertices();
// if cull mode is CULL_ALL, tell VertexManager to skip triangles and quads.
2022-07-26 02:57:30 -06:00
// They still need to go through vertex loading, because we need to calculate a zfreeze
// reference slope.
const bool cullall = (bpmem.genMode.cullmode == CullMode::All &&
primitive < OpcodeDecoder::Primitive::GX_DRAW_LINES);
2022-07-26 02:57:30 -06:00
const int stride = loader->m_native_vtx_decl.stride;
DataReader dst = g_vertex_manager->PrepareForAdditionalData(primitive, count, stride,
cullall || can_cpu_cull);
count = loader->RunVertices(src, dst.GetPointer(), count);
2022-07-26 02:57:30 -06:00
if (can_cpu_cull && !cullall)
{
if (!g_vertex_manager->AreAllVerticesCulled(loader, primitive, dst.GetPointer(), count))
{
DataReader new_dst = g_vertex_manager->DisableCullAll(stride);
memmove(new_dst.GetPointer(), dst.GetPointer(), count * stride);
}
}
g_vertex_manager->AddIndices(primitive, count);
g_vertex_manager->FlushData(count, loader->m_native_vtx_decl.stride);
2014-12-09 00:35:04 -07:00
ADDSTAT(g_stats.this_frame.num_prims, count);
INCSTAT(g_stats.this_frame.num_primitive_joins);
}
2014-12-09 00:35:04 -07:00
return size;
}
template int RunVertices<false>(int vtx_attr_group, OpcodeDecoder::Primitive primitive, int count,
const u8* src);
template int RunVertices<true>(int vtx_attr_group, OpcodeDecoder::Primitive primitive, int count,
const u8* src);
NativeVertexFormat* GetCurrentVertexFormat()
{
return s_current_vtx_fmt;
}
} // namespace VertexLoaderManager