2013-04-17 21:09:55 -06:00
|
|
|
// Copyright 2013 Dolphin Emulator Project
|
|
|
|
// Licensed under GPLv2
|
|
|
|
// Refer to the license.txt file included.
|
2009-03-07 01:35:01 -07:00
|
|
|
|
2010-03-05 19:07:48 -07:00
|
|
|
#include <algorithm>
|
2014-06-05 09:55:21 -06:00
|
|
|
#include <memory>
|
2012-12-09 23:40:28 -07:00
|
|
|
#include <unordered_map>
|
2014-08-15 08:17:06 -06:00
|
|
|
#include <utility>
|
2009-03-07 01:35:01 -07:00
|
|
|
#include <vector>
|
|
|
|
|
2014-02-17 03:18:15 -07:00
|
|
|
#include "Core/HW/Memmap.h"
|
2009-03-07 01:35:01 -07:00
|
|
|
|
2014-08-04 20:51:42 -06:00
|
|
|
#include "VideoCommon/BPMemory.h"
|
2014-08-02 00:42:15 -06:00
|
|
|
#include "VideoCommon/IndexGenerator.h"
|
2014-02-17 03:18:15 -07:00
|
|
|
#include "VideoCommon/Statistics.h"
|
|
|
|
#include "VideoCommon/VertexLoader.h"
|
|
|
|
#include "VideoCommon/VertexLoaderManager.h"
|
2014-07-25 17:10:44 -06:00
|
|
|
#include "VideoCommon/VertexManagerBase.h"
|
2014-02-17 03:18:15 -07:00
|
|
|
#include "VideoCommon/VertexShaderManager.h"
|
|
|
|
#include "VideoCommon/VideoCommon.h"
|
2009-03-07 01:35:01 -07:00
|
|
|
|
|
|
|
static int s_attr_dirty; // bitfield
|
|
|
|
|
2014-07-25 17:10:44 -06:00
|
|
|
static NativeVertexFormat* s_current_vtx_fmt;
|
2014-08-15 08:17:06 -06:00
|
|
|
|
|
|
|
typedef std::pair<VertexLoader*, NativeVertexFormat*> VertexLoaderCacheItem;
|
|
|
|
static VertexLoaderCacheItem s_VertexLoaders[8];
|
2009-03-07 01:35:01 -07:00
|
|
|
|
2012-12-10 07:40:27 -07:00
|
|
|
namespace std
|
|
|
|
{
|
2013-04-01 18:24:27 -06:00
|
|
|
|
|
|
|
template <>
|
|
|
|
struct hash<VertexLoaderUID>
|
|
|
|
{
|
|
|
|
size_t operator()(const VertexLoaderUID& uid) const
|
|
|
|
{
|
|
|
|
return uid.GetHash();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2010-03-05 19:07:48 -07:00
|
|
|
}
|
2012-12-10 07:40:27 -07:00
|
|
|
|
2014-08-15 08:17:06 -06:00
|
|
|
typedef std::unordered_map<VertexLoaderUID, VertexLoaderCacheItem> VertexLoaderMap;
|
2014-06-05 09:55:21 -06:00
|
|
|
typedef std::map<PortableVertexDeclaration, std::unique_ptr<NativeVertexFormat>> NativeVertexLoaderMap;
|
2010-03-05 19:07:48 -07:00
|
|
|
|
2009-03-07 01:35:01 -07:00
|
|
|
namespace VertexLoaderManager
|
|
|
|
{
|
|
|
|
|
2014-08-15 08:17:06 -06:00
|
|
|
static VertexLoaderMap s_VertexLoaderMap;
|
2014-06-05 09:55:21 -06:00
|
|
|
static NativeVertexLoaderMap s_native_vertex_map;
|
2009-03-07 01:35:01 -07:00
|
|
|
// TODO - change into array of pointers. Keep a map of all seen so far.
|
|
|
|
|
|
|
|
void Init()
|
|
|
|
{
|
|
|
|
MarkAllDirty();
|
2014-08-15 08:17:06 -06:00
|
|
|
for (auto& map_entry : s_VertexLoaders)
|
|
|
|
{
|
|
|
|
map_entry.first = nullptr;
|
|
|
|
map_entry.second = nullptr;
|
|
|
|
}
|
2009-04-24 10:40:58 -06:00
|
|
|
RecomputeCachedArraybases();
|
2009-03-07 01:35:01 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void Shutdown()
|
|
|
|
{
|
2014-08-15 08:17:06 -06:00
|
|
|
for (auto& map_entry : s_VertexLoaderMap)
|
2009-03-07 01:35:01 -07:00
|
|
|
{
|
2014-08-15 08:17:06 -06:00
|
|
|
delete map_entry.second.first;
|
2009-03-07 01:35:01 -07:00
|
|
|
}
|
2014-08-15 08:17:06 -06:00
|
|
|
s_VertexLoaderMap.clear();
|
2014-06-05 09:55:21 -06:00
|
|
|
s_native_vertex_map.clear();
|
2009-03-07 01:35:01 -07:00
|
|
|
}
|
|
|
|
|
2013-04-24 07:21:54 -06:00
|
|
|
namespace
|
|
|
|
{
|
|
|
|
struct entry
|
|
|
|
{
|
2009-03-07 01:35:01 -07:00
|
|
|
std::string text;
|
|
|
|
u64 num_verts;
|
2013-04-24 07:21:54 -06:00
|
|
|
bool operator < (const entry &other) const
|
|
|
|
{
|
2009-03-07 01:35:01 -07:00
|
|
|
return num_verts > other.num_verts;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
void AppendListToString(std::string *dest)
|
|
|
|
{
|
|
|
|
std::vector<entry> entries;
|
|
|
|
|
|
|
|
size_t total_size = 0;
|
2014-08-15 08:17:06 -06:00
|
|
|
for (const auto& map_entry : s_VertexLoaderMap)
|
2009-03-07 01:35:01 -07:00
|
|
|
{
|
|
|
|
entry e;
|
2014-08-15 08:17:06 -06:00
|
|
|
map_entry.second.first->AppendToString(&e.text);
|
|
|
|
e.num_verts = map_entry.second.first->GetNumLoadedVerts();
|
2009-03-07 01:35:01 -07:00
|
|
|
entries.push_back(e);
|
|
|
|
total_size += e.text.size() + 1;
|
|
|
|
}
|
|
|
|
sort(entries.begin(), entries.end());
|
|
|
|
dest->reserve(dest->size() + total_size);
|
2014-02-12 08:00:34 -07:00
|
|
|
for (const entry& entry : entries)
|
2013-04-24 07:21:54 -06:00
|
|
|
{
|
2014-02-12 08:00:34 -07:00
|
|
|
dest->append(entry.text);
|
2009-03-07 01:35:01 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void MarkAllDirty()
|
|
|
|
{
|
|
|
|
s_attr_dirty = 0xff;
|
|
|
|
}
|
|
|
|
|
2014-07-25 17:10:44 -06:00
|
|
|
static NativeVertexFormat* GetNativeVertexFormat(const PortableVertexDeclaration& format,
|
|
|
|
u32 components)
|
|
|
|
{
|
|
|
|
auto& native = s_native_vertex_map[format];
|
|
|
|
if (!native)
|
|
|
|
{
|
|
|
|
auto raw_pointer = g_vertex_manager->CreateNativeVertexFormat();
|
|
|
|
native = std::unique_ptr<NativeVertexFormat>(raw_pointer);
|
|
|
|
native->Initialize(format);
|
|
|
|
native->m_components = components;
|
|
|
|
}
|
|
|
|
return native.get();
|
|
|
|
}
|
|
|
|
|
2014-08-15 08:17:06 -06:00
|
|
|
static VertexLoaderCacheItem RefreshLoader(int vtx_attr_group)
|
|
|
|
{
|
|
|
|
if ((s_attr_dirty >> vtx_attr_group) & 1)
|
|
|
|
{
|
|
|
|
VertexLoaderUID uid(g_VtxDesc, g_VtxAttr[vtx_attr_group]);
|
|
|
|
VertexLoaderMap::iterator iter = s_VertexLoaderMap.find(uid);
|
|
|
|
if (iter != s_VertexLoaderMap.end())
|
|
|
|
{
|
|
|
|
s_VertexLoaders[vtx_attr_group] = iter->second;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
VertexLoader* loader = new VertexLoader(g_VtxDesc, g_VtxAttr[vtx_attr_group]);
|
|
|
|
|
|
|
|
NativeVertexFormat* vtx_fmt = GetNativeVertexFormat(
|
|
|
|
loader->GetNativeVertexDeclaration(),
|
|
|
|
loader->GetNativeComponents());
|
|
|
|
|
|
|
|
s_VertexLoaderMap[uid] = std::make_pair(loader, vtx_fmt);
|
|
|
|
s_VertexLoaders[vtx_attr_group] = std::make_pair(loader, vtx_fmt);
|
|
|
|
INCSTAT(stats.numVertexLoaders);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s_attr_dirty &= ~(1 << vtx_attr_group);
|
|
|
|
return s_VertexLoaders[vtx_attr_group];
|
|
|
|
}
|
|
|
|
|
2014-09-03 13:49:15 -06:00
|
|
|
bool RunVertices(int vtx_attr_group, int primitive, int count, size_t buf_size, bool skip_drawing)
|
2009-03-07 01:35:01 -07:00
|
|
|
{
|
|
|
|
if (!count)
|
Refactor opcode decoding a bit to kill FifoCommandRunnable.
Separated out from my gpu-determinism branch by request. It's not a big
commit; I just like to write long commit messages.
The main reason to kill it is hopefully a slight performance improvement
from avoiding the double switch (especially in single core mode);
however, this also improves cycle calculation, as described below.
- FifoCommandRunnable is removed; in its stead, Decode returns the
number of cycles (which only matters for "sync" GPU mode), or 0 if there
was not enough data, and is also responsible for unknown opcode alerts.
Decode and DecodeSemiNop are almost identical, so the latter is replaced
with a skipped_frame parameter to Decode. Doesn't mean we can't improve
skipped_frame mode to do less work; if, at such a point, branching on it
has too much overhead (it certainly won't now), it can always be changed
to a template parameter.
- FifoCommandRunnable used a fixed, large cycle count for display lists,
regardless of the contents. Presumably the actual hardware's processing
time is mostly the processing time of whatever commands are in the list,
and with this change InterpretDisplayList can just return the list's
cycle count to be added to the total. (Since the calculation for this
is part of Decode, it didn't seem easy to split this change up.)
To facilitate this, Decode also gains an explicit 'end' parameter in
lieu of FifoCommandRunnable's call to GetVideoBufferEndPtr, which can
point to there or to the end of a display list (or elsewhere in
gpu-determinism, but that's another story). Also, as a small
optimization, InterpretDisplayList now calls OpcodeDecoder_Run rather
than having its own Decode loop, to allow Decode to be inlined (haven't
checked whether this actually happens though).
skipped_frame mode still does not traverse display lists and uses the
old fake value of 45 cycles. degasus has suggested that this hack is
not essential for performance and can be removed, but I want to separate
any potential performance impact of that from this commit.
2014-08-31 23:11:32 -06:00
|
|
|
return true;
|
2014-08-15 08:17:06 -06:00
|
|
|
auto loader = RefreshLoader(vtx_attr_group);
|
2014-07-25 17:10:44 -06:00
|
|
|
|
Refactor opcode decoding a bit to kill FifoCommandRunnable.
Separated out from my gpu-determinism branch by request. It's not a big
commit; I just like to write long commit messages.
The main reason to kill it is hopefully a slight performance improvement
from avoiding the double switch (especially in single core mode);
however, this also improves cycle calculation, as described below.
- FifoCommandRunnable is removed; in its stead, Decode returns the
number of cycles (which only matters for "sync" GPU mode), or 0 if there
was not enough data, and is also responsible for unknown opcode alerts.
Decode and DecodeSemiNop are almost identical, so the latter is replaced
with a skipped_frame parameter to Decode. Doesn't mean we can't improve
skipped_frame mode to do less work; if, at such a point, branching on it
has too much overhead (it certainly won't now), it can always be changed
to a template parameter.
- FifoCommandRunnable used a fixed, large cycle count for display lists,
regardless of the contents. Presumably the actual hardware's processing
time is mostly the processing time of whatever commands are in the list,
and with this change InterpretDisplayList can just return the list's
cycle count to be added to the total. (Since the calculation for this
is part of Decode, it didn't seem easy to split this change up.)
To facilitate this, Decode also gains an explicit 'end' parameter in
lieu of FifoCommandRunnable's call to GetVideoBufferEndPtr, which can
point to there or to the end of a display list (or elsewhere in
gpu-determinism, but that's another story). Also, as a small
optimization, InterpretDisplayList now calls OpcodeDecoder_Run rather
than having its own Decode loop, to allow Decode to be inlined (haven't
checked whether this actually happens though).
skipped_frame mode still does not traverse display lists and uses the
old fake value of 45 cycles. degasus has suggested that this hack is
not essential for performance and can be removed, but I want to separate
any potential performance impact of that from this commit.
2014-08-31 23:11:32 -06:00
|
|
|
size_t size = count * loader.first->GetVertexSize();
|
|
|
|
if (buf_size < size)
|
|
|
|
return false;
|
|
|
|
|
2014-09-03 13:49:15 -06:00
|
|
|
if (skip_drawing || (bpmem.genMode.cullmode == GenMode::CULL_ALL && primitive < 5))
|
2014-08-04 20:51:42 -06:00
|
|
|
{
|
|
|
|
// if cull mode is CULL_ALL, ignore triangles and quads
|
Refactor opcode decoding a bit to kill FifoCommandRunnable.
Separated out from my gpu-determinism branch by request. It's not a big
commit; I just like to write long commit messages.
The main reason to kill it is hopefully a slight performance improvement
from avoiding the double switch (especially in single core mode);
however, this also improves cycle calculation, as described below.
- FifoCommandRunnable is removed; in its stead, Decode returns the
number of cycles (which only matters for "sync" GPU mode), or 0 if there
was not enough data, and is also responsible for unknown opcode alerts.
Decode and DecodeSemiNop are almost identical, so the latter is replaced
with a skipped_frame parameter to Decode. Doesn't mean we can't improve
skipped_frame mode to do less work; if, at such a point, branching on it
has too much overhead (it certainly won't now), it can always be changed
to a template parameter.
- FifoCommandRunnable used a fixed, large cycle count for display lists,
regardless of the contents. Presumably the actual hardware's processing
time is mostly the processing time of whatever commands are in the list,
and with this change InterpretDisplayList can just return the list's
cycle count to be added to the total. (Since the calculation for this
is part of Decode, it didn't seem easy to split this change up.)
To facilitate this, Decode also gains an explicit 'end' parameter in
lieu of FifoCommandRunnable's call to GetVideoBufferEndPtr, which can
point to there or to the end of a display list (or elsewhere in
gpu-determinism, but that's another story). Also, as a small
optimization, InterpretDisplayList now calls OpcodeDecoder_Run rather
than having its own Decode loop, to allow Decode to be inlined (haven't
checked whether this actually happens though).
skipped_frame mode still does not traverse display lists and uses the
old fake value of 45 cycles. degasus has suggested that this hack is
not essential for performance and can be removed, but I want to separate
any potential performance impact of that from this commit.
2014-08-31 23:11:32 -06:00
|
|
|
DataSkip((u32)size);
|
|
|
|
return true;
|
2014-08-04 20:51:42 -06:00
|
|
|
}
|
|
|
|
|
2014-07-25 17:10:44 -06:00
|
|
|
// If the native vertex format changed, force a flush.
|
2014-08-15 08:17:06 -06:00
|
|
|
if (loader.second != s_current_vtx_fmt)
|
2014-07-25 17:10:44 -06:00
|
|
|
VertexManager::Flush();
|
2014-08-15 08:17:06 -06:00
|
|
|
s_current_vtx_fmt = loader.second;
|
2014-07-25 17:10:44 -06:00
|
|
|
|
2014-08-02 00:42:15 -06:00
|
|
|
VertexManager::PrepareForAdditionalData(primitive, count,
|
2014-08-15 08:17:06 -06:00
|
|
|
loader.first->GetNativeVertexDeclaration().stride);
|
2014-08-02 00:42:15 -06:00
|
|
|
|
2014-08-15 08:17:06 -06:00
|
|
|
loader.first->RunVertices(g_VtxAttr[vtx_attr_group], primitive, count);
|
2014-08-02 00:42:15 -06:00
|
|
|
|
|
|
|
IndexGenerator::AddIndices(primitive, count);
|
|
|
|
|
|
|
|
ADDSTAT(stats.thisFrame.numPrims, count);
|
|
|
|
INCSTAT(stats.thisFrame.numPrimitiveJoins);
|
Refactor opcode decoding a bit to kill FifoCommandRunnable.
Separated out from my gpu-determinism branch by request. It's not a big
commit; I just like to write long commit messages.
The main reason to kill it is hopefully a slight performance improvement
from avoiding the double switch (especially in single core mode);
however, this also improves cycle calculation, as described below.
- FifoCommandRunnable is removed; in its stead, Decode returns the
number of cycles (which only matters for "sync" GPU mode), or 0 if there
was not enough data, and is also responsible for unknown opcode alerts.
Decode and DecodeSemiNop are almost identical, so the latter is replaced
with a skipped_frame parameter to Decode. Doesn't mean we can't improve
skipped_frame mode to do less work; if, at such a point, branching on it
has too much overhead (it certainly won't now), it can always be changed
to a template parameter.
- FifoCommandRunnable used a fixed, large cycle count for display lists,
regardless of the contents. Presumably the actual hardware's processing
time is mostly the processing time of whatever commands are in the list,
and with this change InterpretDisplayList can just return the list's
cycle count to be added to the total. (Since the calculation for this
is part of Decode, it didn't seem easy to split this change up.)
To facilitate this, Decode also gains an explicit 'end' parameter in
lieu of FifoCommandRunnable's call to GetVideoBufferEndPtr, which can
point to there or to the end of a display list (or elsewhere in
gpu-determinism, but that's another story). Also, as a small
optimization, InterpretDisplayList now calls OpcodeDecoder_Run rather
than having its own Decode loop, to allow Decode to be inlined (haven't
checked whether this actually happens though).
skipped_frame mode still does not traverse display lists and uses the
old fake value of 45 cycles. degasus has suggested that this hack is
not essential for performance and can be removed, but I want to separate
any potential performance impact of that from this commit.
2014-08-31 23:11:32 -06:00
|
|
|
return true;
|
2014-01-30 06:48:23 -07:00
|
|
|
}
|
2009-08-07 19:39:56 -06:00
|
|
|
|
2009-03-07 01:35:01 -07:00
|
|
|
int GetVertexSize(int vtx_attr_group)
|
|
|
|
{
|
2014-08-15 08:17:06 -06:00
|
|
|
return RefreshLoader(vtx_attr_group).first->GetVertexSize();
|
2009-03-07 01:35:01 -07:00
|
|
|
}
|
|
|
|
|
2014-07-25 17:10:44 -06:00
|
|
|
NativeVertexFormat* GetCurrentVertexFormat()
|
2014-06-05 09:55:21 -06:00
|
|
|
{
|
2014-07-25 17:10:44 -06:00
|
|
|
return s_current_vtx_fmt;
|
2014-06-05 09:55:21 -06:00
|
|
|
}
|
|
|
|
|
2009-03-07 01:35:01 -07:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
void LoadCPReg(u32 sub_cmd, u32 value)
|
|
|
|
{
|
|
|
|
switch (sub_cmd & 0xF0)
|
|
|
|
{
|
|
|
|
case 0x30:
|
|
|
|
VertexShaderManager::SetTexMatrixChangedA(value);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x40:
|
|
|
|
VertexShaderManager::SetTexMatrixChangedB(value);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x50:
|
|
|
|
g_VtxDesc.Hex &= ~0x1FFFF; // keep the Upper bits
|
|
|
|
g_VtxDesc.Hex |= value;
|
|
|
|
s_attr_dirty = 0xFF;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x60:
|
|
|
|
g_VtxDesc.Hex &= 0x1FFFF; // keep the lower 17Bits
|
|
|
|
g_VtxDesc.Hex |= (u64)value << 17;
|
|
|
|
s_attr_dirty = 0xFF;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x70:
|
|
|
|
_assert_((sub_cmd & 0x0F) < 8);
|
|
|
|
g_VtxAttr[sub_cmd & 7].g0.Hex = value;
|
|
|
|
s_attr_dirty |= 1 << (sub_cmd & 7);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x80:
|
|
|
|
_assert_((sub_cmd & 0x0F) < 8);
|
|
|
|
g_VtxAttr[sub_cmd & 7].g1.Hex = value;
|
|
|
|
s_attr_dirty |= 1 << (sub_cmd & 7);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x90:
|
|
|
|
_assert_((sub_cmd & 0x0F) < 8);
|
|
|
|
g_VtxAttr[sub_cmd & 7].g2.Hex = value;
|
|
|
|
s_attr_dirty |= 1 << (sub_cmd & 7);
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Pointers to vertex arrays in GC RAM
|
|
|
|
case 0xA0:
|
|
|
|
arraybases[sub_cmd & 0xF] = value;
|
2011-01-30 18:28:32 -07:00
|
|
|
cached_arraybases[sub_cmd & 0xF] = Memory::GetPointer(value);
|
2009-03-07 01:35:01 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 0xB0:
|
|
|
|
arraystrides[sub_cmd & 0xF] = value & 0xFF;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-26 20:55:08 -06:00
|
|
|
void FillCPMemoryArray(u32 *memory)
|
|
|
|
{
|
|
|
|
memory[0x30] = MatrixIndexA.Hex;
|
|
|
|
memory[0x40] = MatrixIndexB.Hex;
|
|
|
|
memory[0x50] = (u32)g_VtxDesc.Hex;
|
|
|
|
memory[0x60] = (u32)(g_VtxDesc.Hex >> 17);
|
|
|
|
|
|
|
|
for (int i = 0; i < 8; ++i)
|
|
|
|
{
|
|
|
|
memory[0x70 + i] = g_VtxAttr[i].g0.Hex;
|
|
|
|
memory[0x80 + i] = g_VtxAttr[i].g1.Hex;
|
|
|
|
memory[0x90 + i] = g_VtxAttr[i].g2.Hex;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < 16; ++i)
|
|
|
|
{
|
|
|
|
memory[0xA0 + i] = arraybases[i];
|
|
|
|
memory[0xB0 + i] = arraystrides[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-03-07 01:35:01 -07:00
|
|
|
void RecomputeCachedArraybases()
|
|
|
|
{
|
|
|
|
for (int i = 0; i < 16; i++)
|
|
|
|
{
|
2011-01-30 18:28:32 -07:00
|
|
|
cached_arraybases[i] = Memory::GetPointer(arraybases[i]);
|
2009-03-07 01:35:01 -07:00
|
|
|
}
|
|
|
|
}
|