2015-05-23 22:55:12 -06:00
|
|
|
// Copyright 2008 Dolphin Emulator Project
|
2015-05-17 17:08:10 -06:00
|
|
|
// Licensed under GPLv2+
|
2013-04-17 21:09:55 -06:00
|
|
|
// Refer to the license.txt file included.
|
2009-03-07 01:35:01 -07:00
|
|
|
|
2021-02-06 22:14:21 -07:00
|
|
|
#include "VideoCommon/XFStructs.h"
|
|
|
|
|
|
|
|
#include "Common/BitUtils.h"
|
2016-01-17 14:54:31 -07:00
|
|
|
#include "Common/CommonTypes.h"
|
|
|
|
#include "Common/Logging/Log.h"
|
2017-03-03 12:43:52 -07:00
|
|
|
#include "Common/Swap.h"
|
|
|
|
|
2021-03-07 16:42:10 -07:00
|
|
|
#include "Core/DolphinAnalytics.h"
|
2014-02-17 03:18:15 -07:00
|
|
|
#include "Core/HW/Memmap.h"
|
2017-03-03 12:43:52 -07:00
|
|
|
|
2014-02-17 03:18:15 -07:00
|
|
|
#include "VideoCommon/CPMemory.h"
|
2014-09-03 14:39:26 -06:00
|
|
|
#include "VideoCommon/DataReader.h"
|
Add the 'desynced GPU thread' mode.
It's a relatively big commit (less big with -w), but it's hard to test
any of this separately...
The basic problem is that in netplay or movies, the state of the CPU
must be deterministic, including when the game receives notification
that the GPU has processed FIFO data. Dual core mode notifies the game
whenever the GPU thread actually gets around to doing the work, so it
isn't deterministic. Single core mode is because it notifies the game
'instantly' (after processing the data synchronously), but it's too slow
for many systems and games.
My old dc-netplay branch worked as follows: everything worked as normal
except the state of the CP registers was a lie, and the CPU thread only
delivered results when idle detection triggered (waiting for the GPU if
they weren't ready at that point). Usually, a game is idle iff all the
work for the frame has been done, except for a small amount of work
depending on the GPU result, so neither the CPU or the GPU waiting on
the other affected performance much. However, it's possible that the
game could be waiting for some earlier interrupt, and any of several
games which, for whatever reason, never went into a detectable idle
(even when I tried to improve the detection) would never receive results
at all. (The current method should have better compatibility, but it
also has slightly higher overhead and breaks some other things, so I
want to reimplement this, hopefully with less impact on the code, in the
future.)
With this commit, the basic idea is that the CPU thread acts as if the
work has been done instantly, like single core mode, but actually hands
it off asynchronously to the GPU thread (after backing up some data that
the game might change in memory before it's actually done). Since the
work isn't done, any feedback from the GPU to the CPU, such as real
XFB/EFB copies (virtual are OK), EFB pokes, performance queries, etc. is
broken; but most games work with these options disabled, and there is no
need to try to detect what the CPU thread is doing.
Technically: when the flag g_use_deterministic_gpu_thread (currently
stuck on) is on, the CPU thread calls RunGpu like in single core mode.
This function synchronously copies the data from the FIFO to the
internal video buffer and updates the CP registers, interrupts, etc.
However, instead of the regular ReadDataFromFifo followed by running the
opcode decoder, it runs ReadDataFromFifoOnCPU ->
OpcodeDecoder_Preprocess, which relatively quickly scans through the
FIFO data, detects SetFinish calls etc., which are immediately fired,
and saves certain associated data from memory (e.g. display lists) in
AuxBuffers (a parallel stream to the main FIFO, which is a bit slow at
the moment), before handing the data off to the GPU thread to actually
render. That makes up the bulk of this commit.
In various circumstances, including the aforementioned EFB pokes and
performance queries as well as swap requests (i.e. the end of a frame -
we don't want the CPU potentially pumping out frames too quickly and the
GPU falling behind*), SyncGPU is called to wait for actual completion.
The overhead mainly comes from OpcodeDecoder_Preprocess (which is,
again, synchronous), as well as the actual copying.
Currently, display lists and such are escrowed from main memory even
though they usually won't change over the course of a frame, and
textures are not even though they might, resulting in a small chance of
graphical glitches. When the texture locking (i.e. fault on write) code
lands, I can make this all correct and maybe a little faster.
* This suggests an alternate determinism method of just delaying results
until a short time before the end of each frame. For all I know this
might mostly work - I haven't tried it - but if any significant work
hinges on the competion of render to texture etc., the frame will be
missed.
2014-08-27 20:56:19 -06:00
|
|
|
#include "VideoCommon/Fifo.h"
|
2014-12-14 13:23:13 -07:00
|
|
|
#include "VideoCommon/GeometryShaderManager.h"
|
2014-02-17 03:18:15 -07:00
|
|
|
#include "VideoCommon/PixelShaderManager.h"
|
|
|
|
#include "VideoCommon/VertexManagerBase.h"
|
|
|
|
#include "VideoCommon/VertexShaderManager.h"
|
|
|
|
#include "VideoCommon/XFMemory.h"
|
2009-03-07 01:35:01 -07:00
|
|
|
|
2014-07-08 06:29:26 -06:00
|
|
|
static void XFMemWritten(u32 transferSize, u32 baseAddress)
|
2011-02-05 11:25:34 -07:00
|
|
|
{
|
2016-08-21 21:02:37 -06:00
|
|
|
g_vertex_manager->Flush();
|
2011-02-05 11:25:34 -07:00
|
|
|
VertexShaderManager::InvalidateXFRange(baseAddress, baseAddress + transferSize);
|
|
|
|
}
|
|
|
|
|
2014-11-27 15:53:11 -07:00
|
|
|
static void XFRegWritten(int transferSize, u32 baseAddress, DataReader src)
|
2009-03-07 01:35:01 -07:00
|
|
|
{
|
2011-02-05 11:25:34 -07:00
|
|
|
u32 address = baseAddress;
|
|
|
|
u32 dataIndex = 0;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2021-02-08 23:15:33 -07:00
|
|
|
while (transferSize > 0 && address < XFMEM_REGISTERS_END)
|
2009-03-07 01:35:01 -07:00
|
|
|
{
|
2014-11-27 15:53:11 -07:00
|
|
|
u32 newValue = src.Peek<u32>(dataIndex * sizeof(u32));
|
2011-02-05 11:25:34 -07:00
|
|
|
u32 nextAddress = address + 1;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
switch (address)
|
2009-03-07 01:35:01 -07:00
|
|
|
{
|
2011-02-05 11:25:34 -07:00
|
|
|
case XFMEM_ERROR:
|
|
|
|
case XFMEM_DIAG:
|
|
|
|
case XFMEM_STATE0: // internal state 0
|
|
|
|
case XFMEM_STATE1: // internal state 1
|
|
|
|
case XFMEM_CLOCK:
|
|
|
|
case XFMEM_SETGPMETRIC:
|
|
|
|
nextAddress = 0x1007;
|
|
|
|
break;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
case XFMEM_CLIPDISABLE:
|
|
|
|
// if (data & 1) {} // disable clipping detection
|
|
|
|
// if (data & 2) {} // disable trivial rejection
|
|
|
|
// if (data & 4) {} // disable cpoly clipping acceleration
|
|
|
|
break;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
case XFMEM_VTXSPECS: //__GXXfVtxSpecs, wrote 0004
|
|
|
|
break;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
case XFMEM_SETNUMCHAN:
|
2014-04-27 12:59:04 -06:00
|
|
|
if (xfmem.numChan.numColorChans != (newValue & 3))
|
2016-08-21 21:02:37 -06:00
|
|
|
g_vertex_manager->Flush();
|
2017-07-19 23:25:27 -06:00
|
|
|
VertexShaderManager::SetLightingConfigChanged();
|
2011-02-05 11:25:34 -07:00
|
|
|
break;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
case XFMEM_SETCHAN0_AMBCOLOR: // Channel Ambient Color
|
|
|
|
case XFMEM_SETCHAN1_AMBCOLOR:
|
2009-11-18 19:28:41 -07:00
|
|
|
{
|
2011-02-05 11:25:34 -07:00
|
|
|
u8 chan = address - XFMEM_SETCHAN0_AMBCOLOR;
|
2014-04-27 12:59:04 -06:00
|
|
|
if (xfmem.ambColor[chan] != newValue)
|
2009-07-26 03:52:35 -06:00
|
|
|
{
|
2016-08-21 21:02:37 -06:00
|
|
|
g_vertex_manager->Flush();
|
2015-08-16 19:07:10 -06:00
|
|
|
VertexShaderManager::SetMaterialColorChanged(chan);
|
2011-02-05 11:25:34 -07:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
case XFMEM_SETCHAN0_MATCOLOR: // Channel Material Color
|
|
|
|
case XFMEM_SETCHAN1_MATCOLOR:
|
|
|
|
{
|
|
|
|
u8 chan = address - XFMEM_SETCHAN0_MATCOLOR;
|
2014-04-27 12:59:04 -06:00
|
|
|
if (xfmem.matColor[chan] != newValue)
|
2009-07-26 03:52:35 -06:00
|
|
|
{
|
2016-08-21 21:02:37 -06:00
|
|
|
g_vertex_manager->Flush();
|
2015-08-16 19:07:10 -06:00
|
|
|
VertexShaderManager::SetMaterialColorChanged(chan + 2);
|
2009-07-26 03:52:35 -06:00
|
|
|
}
|
2011-02-05 11:25:34 -07:00
|
|
|
break;
|
|
|
|
}
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
case XFMEM_SETCHAN0_COLOR: // Channel Color
|
|
|
|
case XFMEM_SETCHAN1_COLOR:
|
|
|
|
case XFMEM_SETCHAN0_ALPHA: // Channel Alpha
|
|
|
|
case XFMEM_SETCHAN1_ALPHA:
|
2014-04-27 12:59:04 -06:00
|
|
|
if (((u32*)&xfmem)[address] != (newValue & 0x7fff))
|
2016-08-21 21:02:37 -06:00
|
|
|
g_vertex_manager->Flush();
|
2017-07-19 23:25:27 -06:00
|
|
|
VertexShaderManager::SetLightingConfigChanged();
|
2011-02-05 11:25:34 -07:00
|
|
|
break;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
case XFMEM_DUALTEX:
|
2021-02-10 17:01:42 -07:00
|
|
|
if (xfmem.dualTexTrans.enabled != bool(newValue & 1))
|
2016-08-21 21:02:37 -06:00
|
|
|
g_vertex_manager->Flush();
|
2017-07-19 23:25:27 -06:00
|
|
|
VertexShaderManager::SetTexMatrixInfoChanged(-1);
|
2011-02-05 11:25:34 -07:00
|
|
|
break;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
case XFMEM_SETMATRIXINDA:
|
|
|
|
VertexShaderManager::SetTexMatrixChangedA(newValue);
|
|
|
|
break;
|
|
|
|
case XFMEM_SETMATRIXINDB:
|
|
|
|
VertexShaderManager::SetTexMatrixChangedB(newValue);
|
|
|
|
break;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
case XFMEM_SETVIEWPORT:
|
|
|
|
case XFMEM_SETVIEWPORT + 1:
|
|
|
|
case XFMEM_SETVIEWPORT + 2:
|
|
|
|
case XFMEM_SETVIEWPORT + 3:
|
|
|
|
case XFMEM_SETVIEWPORT + 4:
|
|
|
|
case XFMEM_SETVIEWPORT + 5:
|
2016-08-21 21:02:37 -06:00
|
|
|
g_vertex_manager->Flush();
|
2012-11-19 13:09:31 -07:00
|
|
|
VertexShaderManager::SetViewportChanged();
|
|
|
|
PixelShaderManager::SetViewportChanged();
|
2014-12-14 13:23:13 -07:00
|
|
|
GeometryShaderManager::SetViewportChanged();
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2012-11-19 13:09:31 -07:00
|
|
|
nextAddress = XFMEM_SETVIEWPORT + 6;
|
|
|
|
break;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
case XFMEM_SETPROJECTION:
|
|
|
|
case XFMEM_SETPROJECTION + 1:
|
|
|
|
case XFMEM_SETPROJECTION + 2:
|
|
|
|
case XFMEM_SETPROJECTION + 3:
|
|
|
|
case XFMEM_SETPROJECTION + 4:
|
|
|
|
case XFMEM_SETPROJECTION + 5:
|
|
|
|
case XFMEM_SETPROJECTION + 6:
|
2016-08-21 21:02:37 -06:00
|
|
|
g_vertex_manager->Flush();
|
2012-11-19 13:09:31 -07:00
|
|
|
VertexShaderManager::SetProjectionChanged();
|
2014-12-14 13:23:13 -07:00
|
|
|
GeometryShaderManager::SetProjectionChanged();
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2012-11-19 13:09:31 -07:00
|
|
|
nextAddress = XFMEM_SETPROJECTION + 7;
|
|
|
|
break;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
case XFMEM_SETNUMTEXGENS: // GXSetNumTexGens
|
2014-04-27 12:59:04 -06:00
|
|
|
if (xfmem.numTexGen.numTexGens != (newValue & 15))
|
2016-08-21 21:02:37 -06:00
|
|
|
g_vertex_manager->Flush();
|
2011-02-05 11:25:34 -07:00
|
|
|
break;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
case XFMEM_SETTEXMTXINFO:
|
|
|
|
case XFMEM_SETTEXMTXINFO + 1:
|
|
|
|
case XFMEM_SETTEXMTXINFO + 2:
|
|
|
|
case XFMEM_SETTEXMTXINFO + 3:
|
|
|
|
case XFMEM_SETTEXMTXINFO + 4:
|
|
|
|
case XFMEM_SETTEXMTXINFO + 5:
|
|
|
|
case XFMEM_SETTEXMTXINFO + 6:
|
|
|
|
case XFMEM_SETTEXMTXINFO + 7:
|
2016-08-21 21:02:37 -06:00
|
|
|
g_vertex_manager->Flush();
|
2017-07-19 23:25:27 -06:00
|
|
|
VertexShaderManager::SetTexMatrixInfoChanged(address - XFMEM_SETTEXMTXINFO);
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2012-11-19 13:09:31 -07:00
|
|
|
nextAddress = XFMEM_SETTEXMTXINFO + 8;
|
|
|
|
break;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2020-04-28 21:26:02 -06:00
|
|
|
case XFMEM_SETPOSTMTXINFO:
|
|
|
|
case XFMEM_SETPOSTMTXINFO + 1:
|
|
|
|
case XFMEM_SETPOSTMTXINFO + 2:
|
|
|
|
case XFMEM_SETPOSTMTXINFO + 3:
|
|
|
|
case XFMEM_SETPOSTMTXINFO + 4:
|
|
|
|
case XFMEM_SETPOSTMTXINFO + 5:
|
|
|
|
case XFMEM_SETPOSTMTXINFO + 6:
|
|
|
|
case XFMEM_SETPOSTMTXINFO + 7:
|
2016-08-21 21:02:37 -06:00
|
|
|
g_vertex_manager->Flush();
|
2020-04-28 21:26:02 -06:00
|
|
|
VertexShaderManager::SetTexMatrixInfoChanged(address - XFMEM_SETPOSTMTXINFO);
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2020-04-28 21:26:02 -06:00
|
|
|
nextAddress = XFMEM_SETPOSTMTXINFO + 8;
|
2012-11-19 13:09:31 -07:00
|
|
|
break;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
// --------------
|
|
|
|
// Unknown Regs
|
|
|
|
// --------------
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
// Maybe these are for Normals?
|
2014-04-27 12:59:04 -06:00
|
|
|
case 0x1048: // xfmem.texcoords[0].nrmmtxinfo.hex = data; break; ??
|
2011-02-05 11:25:34 -07:00
|
|
|
case 0x1049:
|
|
|
|
case 0x104a:
|
|
|
|
case 0x104b:
|
|
|
|
case 0x104c:
|
|
|
|
case 0x104d:
|
|
|
|
case 0x104e:
|
|
|
|
case 0x104f:
|
2021-03-07 16:42:10 -07:00
|
|
|
DolphinAnalytics::Instance().ReportGameQuirk(GameQuirk::USES_UNKNOWN_XF_COMMAND);
|
2020-11-13 20:33:26 -07:00
|
|
|
DEBUG_LOG_FMT(VIDEO, "Possible Normal Mtx XF reg?: {:x}={:x}", address, newValue);
|
2011-02-05 11:25:34 -07:00
|
|
|
break;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
case 0x1013:
|
|
|
|
case 0x1014:
|
|
|
|
case 0x1015:
|
|
|
|
case 0x1016:
|
|
|
|
case 0x1017:
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
default:
|
2021-03-07 16:42:10 -07:00
|
|
|
DolphinAnalytics::Instance().ReportGameQuirk(GameQuirk::USES_UNKNOWN_XF_COMMAND);
|
2021-03-26 23:35:32 -06:00
|
|
|
WARN_LOG_FMT(VIDEO, "Unknown XF Reg: {:x}={:x}", address, newValue);
|
2011-02-05 11:25:34 -07:00
|
|
|
break;
|
2009-03-07 01:35:01 -07:00
|
|
|
}
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
int transferred = nextAddress - address;
|
|
|
|
address = nextAddress;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
transferSize -= transferred;
|
|
|
|
dataIndex += transferred;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-27 15:53:11 -07:00
|
|
|
void LoadXFReg(u32 transferSize, u32 baseAddress, DataReader src)
|
2011-02-05 11:25:34 -07:00
|
|
|
{
|
|
|
|
// do not allow writes past registers
|
2021-02-08 23:15:33 -07:00
|
|
|
if (baseAddress + transferSize > XFMEM_REGISTERS_END)
|
2011-02-05 11:25:34 -07:00
|
|
|
{
|
2020-11-13 20:33:26 -07:00
|
|
|
WARN_LOG_FMT(VIDEO, "XF load exceeds address space: {:x} {} bytes", baseAddress, transferSize);
|
2021-03-07 16:42:10 -07:00
|
|
|
DolphinAnalytics::Instance().ReportGameQuirk(GameQuirk::USES_UNKNOWN_XF_COMMAND);
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2021-02-08 23:15:33 -07:00
|
|
|
if (baseAddress >= XFMEM_REGISTERS_END)
|
2011-02-05 11:25:34 -07:00
|
|
|
transferSize = 0;
|
|
|
|
else
|
2021-02-08 23:15:33 -07:00
|
|
|
transferSize = XFMEM_REGISTERS_END - baseAddress;
|
2009-03-07 01:35:01 -07:00
|
|
|
}
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
// write to XF mem
|
2021-02-08 23:15:33 -07:00
|
|
|
if (baseAddress < XFMEM_REGISTERS_START && transferSize > 0)
|
2011-02-05 11:25:34 -07:00
|
|
|
{
|
|
|
|
u32 end = baseAddress + transferSize;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
u32 xfMemBase = baseAddress;
|
|
|
|
u32 xfMemTransferSize = transferSize;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2021-02-08 23:15:33 -07:00
|
|
|
if (end >= XFMEM_REGISTERS_START)
|
2011-02-05 11:25:34 -07:00
|
|
|
{
|
2021-02-08 23:15:33 -07:00
|
|
|
xfMemTransferSize = XFMEM_REGISTERS_START - baseAddress;
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2021-02-08 23:15:33 -07:00
|
|
|
baseAddress = XFMEM_REGISTERS_START;
|
|
|
|
transferSize = end - XFMEM_REGISTERS_START;
|
2011-02-05 11:25:34 -07:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
transferSize = 0;
|
|
|
|
}
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2012-11-19 13:09:31 -07:00
|
|
|
XFMemWritten(xfMemTransferSize, xfMemBase);
|
2014-09-03 14:39:26 -06:00
|
|
|
for (u32 i = 0; i < xfMemTransferSize; i++)
|
|
|
|
{
|
2014-11-27 15:53:11 -07:00
|
|
|
((u32*)&xfmem)[xfMemBase + i] = src.Read<u32>();
|
2014-09-03 14:39:26 -06:00
|
|
|
}
|
2011-02-05 11:25:34 -07:00
|
|
|
}
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2011-02-05 11:25:34 -07:00
|
|
|
// write to XF regs
|
|
|
|
if (transferSize > 0)
|
|
|
|
{
|
2014-11-27 15:53:11 -07:00
|
|
|
XFRegWritten(transferSize, baseAddress, src);
|
2014-09-03 14:39:26 -06:00
|
|
|
for (u32 i = 0; i < transferSize; i++)
|
|
|
|
{
|
2014-11-27 15:53:11 -07:00
|
|
|
((u32*)&xfmem)[baseAddress + i] = src.Read<u32>();
|
2014-09-03 14:39:26 -06:00
|
|
|
}
|
2011-02-05 11:25:34 -07:00
|
|
|
}
|
2009-03-07 01:35:01 -07:00
|
|
|
}
|
|
|
|
|
2021-02-20 14:17:42 -07:00
|
|
|
constexpr std::tuple<u32, u32, u32> ExtractIndexedXF(u32 val)
|
|
|
|
{
|
|
|
|
const u32 index = val >> 16;
|
|
|
|
const u32 address = val & 0xFFF; // check mask
|
|
|
|
const u32 size = ((val >> 12) & 0xF) + 1;
|
|
|
|
|
|
|
|
return {index, address, size};
|
|
|
|
}
|
|
|
|
|
2009-03-07 01:35:01 -07:00
|
|
|
// TODO - verify that it is correct. Seems to work, though.
|
2011-05-25 12:14:29 -06:00
|
|
|
void LoadIndexedXF(u32 val, int refarray)
|
2009-03-07 01:35:01 -07:00
|
|
|
{
|
2021-02-20 14:17:42 -07:00
|
|
|
const auto [index, address, size] = ExtractIndexedXF(val);
|
2012-05-19 02:54:40 -06:00
|
|
|
// load stuff from array to address in xf mem
|
2016-06-24 02:43:46 -06:00
|
|
|
|
2014-04-27 12:59:04 -06:00
|
|
|
u32* currData = (u32*)(&xfmem) + address;
|
Add the 'desynced GPU thread' mode.
It's a relatively big commit (less big with -w), but it's hard to test
any of this separately...
The basic problem is that in netplay or movies, the state of the CPU
must be deterministic, including when the game receives notification
that the GPU has processed FIFO data. Dual core mode notifies the game
whenever the GPU thread actually gets around to doing the work, so it
isn't deterministic. Single core mode is because it notifies the game
'instantly' (after processing the data synchronously), but it's too slow
for many systems and games.
My old dc-netplay branch worked as follows: everything worked as normal
except the state of the CP registers was a lie, and the CPU thread only
delivered results when idle detection triggered (waiting for the GPU if
they weren't ready at that point). Usually, a game is idle iff all the
work for the frame has been done, except for a small amount of work
depending on the GPU result, so neither the CPU or the GPU waiting on
the other affected performance much. However, it's possible that the
game could be waiting for some earlier interrupt, and any of several
games which, for whatever reason, never went into a detectable idle
(even when I tried to improve the detection) would never receive results
at all. (The current method should have better compatibility, but it
also has slightly higher overhead and breaks some other things, so I
want to reimplement this, hopefully with less impact on the code, in the
future.)
With this commit, the basic idea is that the CPU thread acts as if the
work has been done instantly, like single core mode, but actually hands
it off asynchronously to the GPU thread (after backing up some data that
the game might change in memory before it's actually done). Since the
work isn't done, any feedback from the GPU to the CPU, such as real
XFB/EFB copies (virtual are OK), EFB pokes, performance queries, etc. is
broken; but most games work with these options disabled, and there is no
need to try to detect what the CPU thread is doing.
Technically: when the flag g_use_deterministic_gpu_thread (currently
stuck on) is on, the CPU thread calls RunGpu like in single core mode.
This function synchronously copies the data from the FIFO to the
internal video buffer and updates the CP registers, interrupts, etc.
However, instead of the regular ReadDataFromFifo followed by running the
opcode decoder, it runs ReadDataFromFifoOnCPU ->
OpcodeDecoder_Preprocess, which relatively quickly scans through the
FIFO data, detects SetFinish calls etc., which are immediately fired,
and saves certain associated data from memory (e.g. display lists) in
AuxBuffers (a parallel stream to the main FIFO, which is a bit slow at
the moment), before handing the data off to the GPU thread to actually
render. That makes up the bulk of this commit.
In various circumstances, including the aforementioned EFB pokes and
performance queries as well as swap requests (i.e. the end of a frame -
we don't want the CPU potentially pumping out frames too quickly and the
GPU falling behind*), SyncGPU is called to wait for actual completion.
The overhead mainly comes from OpcodeDecoder_Preprocess (which is,
again, synchronous), as well as the actual copying.
Currently, display lists and such are escrowed from main memory even
though they usually won't change over the course of a frame, and
textures are not even though they might, resulting in a small chance of
graphical glitches. When the texture locking (i.e. fault on write) code
lands, I can make this all correct and maybe a little faster.
* This suggests an alternate determinism method of just delaying results
until a short time before the end of each frame. For all I know this
might mostly work - I haven't tried it - but if any significant work
hinges on the competion of render to texture etc., the frame will be
missed.
2014-08-27 20:56:19 -06:00
|
|
|
u32* newData;
|
2016-01-23 21:31:13 -07:00
|
|
|
if (Fifo::UseDeterministicGPUThread())
|
Add the 'desynced GPU thread' mode.
It's a relatively big commit (less big with -w), but it's hard to test
any of this separately...
The basic problem is that in netplay or movies, the state of the CPU
must be deterministic, including when the game receives notification
that the GPU has processed FIFO data. Dual core mode notifies the game
whenever the GPU thread actually gets around to doing the work, so it
isn't deterministic. Single core mode is because it notifies the game
'instantly' (after processing the data synchronously), but it's too slow
for many systems and games.
My old dc-netplay branch worked as follows: everything worked as normal
except the state of the CP registers was a lie, and the CPU thread only
delivered results when idle detection triggered (waiting for the GPU if
they weren't ready at that point). Usually, a game is idle iff all the
work for the frame has been done, except for a small amount of work
depending on the GPU result, so neither the CPU or the GPU waiting on
the other affected performance much. However, it's possible that the
game could be waiting for some earlier interrupt, and any of several
games which, for whatever reason, never went into a detectable idle
(even when I tried to improve the detection) would never receive results
at all. (The current method should have better compatibility, but it
also has slightly higher overhead and breaks some other things, so I
want to reimplement this, hopefully with less impact on the code, in the
future.)
With this commit, the basic idea is that the CPU thread acts as if the
work has been done instantly, like single core mode, but actually hands
it off asynchronously to the GPU thread (after backing up some data that
the game might change in memory before it's actually done). Since the
work isn't done, any feedback from the GPU to the CPU, such as real
XFB/EFB copies (virtual are OK), EFB pokes, performance queries, etc. is
broken; but most games work with these options disabled, and there is no
need to try to detect what the CPU thread is doing.
Technically: when the flag g_use_deterministic_gpu_thread (currently
stuck on) is on, the CPU thread calls RunGpu like in single core mode.
This function synchronously copies the data from the FIFO to the
internal video buffer and updates the CP registers, interrupts, etc.
However, instead of the regular ReadDataFromFifo followed by running the
opcode decoder, it runs ReadDataFromFifoOnCPU ->
OpcodeDecoder_Preprocess, which relatively quickly scans through the
FIFO data, detects SetFinish calls etc., which are immediately fired,
and saves certain associated data from memory (e.g. display lists) in
AuxBuffers (a parallel stream to the main FIFO, which is a bit slow at
the moment), before handing the data off to the GPU thread to actually
render. That makes up the bulk of this commit.
In various circumstances, including the aforementioned EFB pokes and
performance queries as well as swap requests (i.e. the end of a frame -
we don't want the CPU potentially pumping out frames too quickly and the
GPU falling behind*), SyncGPU is called to wait for actual completion.
The overhead mainly comes from OpcodeDecoder_Preprocess (which is,
again, synchronous), as well as the actual copying.
Currently, display lists and such are escrowed from main memory even
though they usually won't change over the course of a frame, and
textures are not even though they might, resulting in a small chance of
graphical glitches. When the texture locking (i.e. fault on write) code
lands, I can make this all correct and maybe a little faster.
* This suggests an alternate determinism method of just delaying results
until a short time before the end of each frame. For all I know this
might mostly work - I haven't tried it - but if any significant work
hinges on the competion of render to texture etc., the frame will be
missed.
2014-08-27 20:56:19 -06:00
|
|
|
{
|
2016-01-12 14:44:58 -07:00
|
|
|
newData = (u32*)Fifo::PopFifoAuxBuffer(size * sizeof(u32));
|
Add the 'desynced GPU thread' mode.
It's a relatively big commit (less big with -w), but it's hard to test
any of this separately...
The basic problem is that in netplay or movies, the state of the CPU
must be deterministic, including when the game receives notification
that the GPU has processed FIFO data. Dual core mode notifies the game
whenever the GPU thread actually gets around to doing the work, so it
isn't deterministic. Single core mode is because it notifies the game
'instantly' (after processing the data synchronously), but it's too slow
for many systems and games.
My old dc-netplay branch worked as follows: everything worked as normal
except the state of the CP registers was a lie, and the CPU thread only
delivered results when idle detection triggered (waiting for the GPU if
they weren't ready at that point). Usually, a game is idle iff all the
work for the frame has been done, except for a small amount of work
depending on the GPU result, so neither the CPU or the GPU waiting on
the other affected performance much. However, it's possible that the
game could be waiting for some earlier interrupt, and any of several
games which, for whatever reason, never went into a detectable idle
(even when I tried to improve the detection) would never receive results
at all. (The current method should have better compatibility, but it
also has slightly higher overhead and breaks some other things, so I
want to reimplement this, hopefully with less impact on the code, in the
future.)
With this commit, the basic idea is that the CPU thread acts as if the
work has been done instantly, like single core mode, but actually hands
it off asynchronously to the GPU thread (after backing up some data that
the game might change in memory before it's actually done). Since the
work isn't done, any feedback from the GPU to the CPU, such as real
XFB/EFB copies (virtual are OK), EFB pokes, performance queries, etc. is
broken; but most games work with these options disabled, and there is no
need to try to detect what the CPU thread is doing.
Technically: when the flag g_use_deterministic_gpu_thread (currently
stuck on) is on, the CPU thread calls RunGpu like in single core mode.
This function synchronously copies the data from the FIFO to the
internal video buffer and updates the CP registers, interrupts, etc.
However, instead of the regular ReadDataFromFifo followed by running the
opcode decoder, it runs ReadDataFromFifoOnCPU ->
OpcodeDecoder_Preprocess, which relatively quickly scans through the
FIFO data, detects SetFinish calls etc., which are immediately fired,
and saves certain associated data from memory (e.g. display lists) in
AuxBuffers (a parallel stream to the main FIFO, which is a bit slow at
the moment), before handing the data off to the GPU thread to actually
render. That makes up the bulk of this commit.
In various circumstances, including the aforementioned EFB pokes and
performance queries as well as swap requests (i.e. the end of a frame -
we don't want the CPU potentially pumping out frames too quickly and the
GPU falling behind*), SyncGPU is called to wait for actual completion.
The overhead mainly comes from OpcodeDecoder_Preprocess (which is,
again, synchronous), as well as the actual copying.
Currently, display lists and such are escrowed from main memory even
though they usually won't change over the course of a frame, and
textures are not even though they might, resulting in a small chance of
graphical glitches. When the texture locking (i.e. fault on write) code
lands, I can make this all correct and maybe a little faster.
* This suggests an alternate determinism method of just delaying results
until a short time before the end of each frame. For all I know this
might mostly work - I haven't tried it - but if any significant work
hinges on the competion of render to texture etc., the frame will be
missed.
2014-08-27 20:56:19 -06:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
newData = (u32*)Memory::GetPointer(g_main_cp_state.array_bases[refarray] +
|
|
|
|
g_main_cp_state.array_strides[refarray] * index);
|
|
|
|
}
|
2012-05-20 14:16:21 -06:00
|
|
|
bool changed = false;
|
2021-02-20 14:17:42 -07:00
|
|
|
for (u32 i = 0; i < size; ++i)
|
2012-05-19 02:54:40 -06:00
|
|
|
{
|
2012-05-20 14:16:21 -06:00
|
|
|
if (currData[i] != Common::swap32(newData[i]))
|
|
|
|
{
|
|
|
|
changed = true;
|
|
|
|
XFMemWritten(size, address);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (changed)
|
|
|
|
{
|
2021-02-20 14:17:42 -07:00
|
|
|
for (u32 i = 0; i < size; ++i)
|
2012-05-20 14:16:21 -06:00
|
|
|
currData[i] = Common::swap32(newData[i]);
|
2012-05-19 02:54:40 -06:00
|
|
|
}
|
2009-03-07 01:35:01 -07:00
|
|
|
}
|
Add the 'desynced GPU thread' mode.
It's a relatively big commit (less big with -w), but it's hard to test
any of this separately...
The basic problem is that in netplay or movies, the state of the CPU
must be deterministic, including when the game receives notification
that the GPU has processed FIFO data. Dual core mode notifies the game
whenever the GPU thread actually gets around to doing the work, so it
isn't deterministic. Single core mode is because it notifies the game
'instantly' (after processing the data synchronously), but it's too slow
for many systems and games.
My old dc-netplay branch worked as follows: everything worked as normal
except the state of the CP registers was a lie, and the CPU thread only
delivered results when idle detection triggered (waiting for the GPU if
they weren't ready at that point). Usually, a game is idle iff all the
work for the frame has been done, except for a small amount of work
depending on the GPU result, so neither the CPU or the GPU waiting on
the other affected performance much. However, it's possible that the
game could be waiting for some earlier interrupt, and any of several
games which, for whatever reason, never went into a detectable idle
(even when I tried to improve the detection) would never receive results
at all. (The current method should have better compatibility, but it
also has slightly higher overhead and breaks some other things, so I
want to reimplement this, hopefully with less impact on the code, in the
future.)
With this commit, the basic idea is that the CPU thread acts as if the
work has been done instantly, like single core mode, but actually hands
it off asynchronously to the GPU thread (after backing up some data that
the game might change in memory before it's actually done). Since the
work isn't done, any feedback from the GPU to the CPU, such as real
XFB/EFB copies (virtual are OK), EFB pokes, performance queries, etc. is
broken; but most games work with these options disabled, and there is no
need to try to detect what the CPU thread is doing.
Technically: when the flag g_use_deterministic_gpu_thread (currently
stuck on) is on, the CPU thread calls RunGpu like in single core mode.
This function synchronously copies the data from the FIFO to the
internal video buffer and updates the CP registers, interrupts, etc.
However, instead of the regular ReadDataFromFifo followed by running the
opcode decoder, it runs ReadDataFromFifoOnCPU ->
OpcodeDecoder_Preprocess, which relatively quickly scans through the
FIFO data, detects SetFinish calls etc., which are immediately fired,
and saves certain associated data from memory (e.g. display lists) in
AuxBuffers (a parallel stream to the main FIFO, which is a bit slow at
the moment), before handing the data off to the GPU thread to actually
render. That makes up the bulk of this commit.
In various circumstances, including the aforementioned EFB pokes and
performance queries as well as swap requests (i.e. the end of a frame -
we don't want the CPU potentially pumping out frames too quickly and the
GPU falling behind*), SyncGPU is called to wait for actual completion.
The overhead mainly comes from OpcodeDecoder_Preprocess (which is,
again, synchronous), as well as the actual copying.
Currently, display lists and such are escrowed from main memory even
though they usually won't change over the course of a frame, and
textures are not even though they might, resulting in a small chance of
graphical glitches. When the texture locking (i.e. fault on write) code
lands, I can make this all correct and maybe a little faster.
* This suggests an alternate determinism method of just delaying results
until a short time before the end of each frame. For all I know this
might mostly work - I haven't tried it - but if any significant work
hinges on the competion of render to texture etc., the frame will be
missed.
2014-08-27 20:56:19 -06:00
|
|
|
|
|
|
|
void PreprocessIndexedXF(u32 val, int refarray)
|
|
|
|
{
|
2021-02-20 14:17:42 -07:00
|
|
|
const auto [index, address, size] = ExtractIndexedXF(val);
|
Add the 'desynced GPU thread' mode.
It's a relatively big commit (less big with -w), but it's hard to test
any of this separately...
The basic problem is that in netplay or movies, the state of the CPU
must be deterministic, including when the game receives notification
that the GPU has processed FIFO data. Dual core mode notifies the game
whenever the GPU thread actually gets around to doing the work, so it
isn't deterministic. Single core mode is because it notifies the game
'instantly' (after processing the data synchronously), but it's too slow
for many systems and games.
My old dc-netplay branch worked as follows: everything worked as normal
except the state of the CP registers was a lie, and the CPU thread only
delivered results when idle detection triggered (waiting for the GPU if
they weren't ready at that point). Usually, a game is idle iff all the
work for the frame has been done, except for a small amount of work
depending on the GPU result, so neither the CPU or the GPU waiting on
the other affected performance much. However, it's possible that the
game could be waiting for some earlier interrupt, and any of several
games which, for whatever reason, never went into a detectable idle
(even when I tried to improve the detection) would never receive results
at all. (The current method should have better compatibility, but it
also has slightly higher overhead and breaks some other things, so I
want to reimplement this, hopefully with less impact on the code, in the
future.)
With this commit, the basic idea is that the CPU thread acts as if the
work has been done instantly, like single core mode, but actually hands
it off asynchronously to the GPU thread (after backing up some data that
the game might change in memory before it's actually done). Since the
work isn't done, any feedback from the GPU to the CPU, such as real
XFB/EFB copies (virtual are OK), EFB pokes, performance queries, etc. is
broken; but most games work with these options disabled, and there is no
need to try to detect what the CPU thread is doing.
Technically: when the flag g_use_deterministic_gpu_thread (currently
stuck on) is on, the CPU thread calls RunGpu like in single core mode.
This function synchronously copies the data from the FIFO to the
internal video buffer and updates the CP registers, interrupts, etc.
However, instead of the regular ReadDataFromFifo followed by running the
opcode decoder, it runs ReadDataFromFifoOnCPU ->
OpcodeDecoder_Preprocess, which relatively quickly scans through the
FIFO data, detects SetFinish calls etc., which are immediately fired,
and saves certain associated data from memory (e.g. display lists) in
AuxBuffers (a parallel stream to the main FIFO, which is a bit slow at
the moment), before handing the data off to the GPU thread to actually
render. That makes up the bulk of this commit.
In various circumstances, including the aforementioned EFB pokes and
performance queries as well as swap requests (i.e. the end of a frame -
we don't want the CPU potentially pumping out frames too quickly and the
GPU falling behind*), SyncGPU is called to wait for actual completion.
The overhead mainly comes from OpcodeDecoder_Preprocess (which is,
again, synchronous), as well as the actual copying.
Currently, display lists and such are escrowed from main memory even
though they usually won't change over the course of a frame, and
textures are not even though they might, resulting in a small chance of
graphical glitches. When the texture locking (i.e. fault on write) code
lands, I can make this all correct and maybe a little faster.
* This suggests an alternate determinism method of just delaying results
until a short time before the end of each frame. For all I know this
might mostly work - I haven't tried it - but if any significant work
hinges on the competion of render to texture etc., the frame will be
missed.
2014-08-27 20:56:19 -06:00
|
|
|
|
2017-03-26 21:09:28 -06:00
|
|
|
const u8* new_data = Memory::GetPointer(g_preprocess_cp_state.array_bases[refarray] +
|
|
|
|
g_preprocess_cp_state.array_strides[refarray] * index);
|
Add the 'desynced GPU thread' mode.
It's a relatively big commit (less big with -w), but it's hard to test
any of this separately...
The basic problem is that in netplay or movies, the state of the CPU
must be deterministic, including when the game receives notification
that the GPU has processed FIFO data. Dual core mode notifies the game
whenever the GPU thread actually gets around to doing the work, so it
isn't deterministic. Single core mode is because it notifies the game
'instantly' (after processing the data synchronously), but it's too slow
for many systems and games.
My old dc-netplay branch worked as follows: everything worked as normal
except the state of the CP registers was a lie, and the CPU thread only
delivered results when idle detection triggered (waiting for the GPU if
they weren't ready at that point). Usually, a game is idle iff all the
work for the frame has been done, except for a small amount of work
depending on the GPU result, so neither the CPU or the GPU waiting on
the other affected performance much. However, it's possible that the
game could be waiting for some earlier interrupt, and any of several
games which, for whatever reason, never went into a detectable idle
(even when I tried to improve the detection) would never receive results
at all. (The current method should have better compatibility, but it
also has slightly higher overhead and breaks some other things, so I
want to reimplement this, hopefully with less impact on the code, in the
future.)
With this commit, the basic idea is that the CPU thread acts as if the
work has been done instantly, like single core mode, but actually hands
it off asynchronously to the GPU thread (after backing up some data that
the game might change in memory before it's actually done). Since the
work isn't done, any feedback from the GPU to the CPU, such as real
XFB/EFB copies (virtual are OK), EFB pokes, performance queries, etc. is
broken; but most games work with these options disabled, and there is no
need to try to detect what the CPU thread is doing.
Technically: when the flag g_use_deterministic_gpu_thread (currently
stuck on) is on, the CPU thread calls RunGpu like in single core mode.
This function synchronously copies the data from the FIFO to the
internal video buffer and updates the CP registers, interrupts, etc.
However, instead of the regular ReadDataFromFifo followed by running the
opcode decoder, it runs ReadDataFromFifoOnCPU ->
OpcodeDecoder_Preprocess, which relatively quickly scans through the
FIFO data, detects SetFinish calls etc., which are immediately fired,
and saves certain associated data from memory (e.g. display lists) in
AuxBuffers (a parallel stream to the main FIFO, which is a bit slow at
the moment), before handing the data off to the GPU thread to actually
render. That makes up the bulk of this commit.
In various circumstances, including the aforementioned EFB pokes and
performance queries as well as swap requests (i.e. the end of a frame -
we don't want the CPU potentially pumping out frames too quickly and the
GPU falling behind*), SyncGPU is called to wait for actual completion.
The overhead mainly comes from OpcodeDecoder_Preprocess (which is,
again, synchronous), as well as the actual copying.
Currently, display lists and such are escrowed from main memory even
though they usually won't change over the course of a frame, and
textures are not even though they might, resulting in a small chance of
graphical glitches. When the texture locking (i.e. fault on write) code
lands, I can make this all correct and maybe a little faster.
* This suggests an alternate determinism method of just delaying results
until a short time before the end of each frame. For all I know this
might mostly work - I haven't tried it - but if any significant work
hinges on the competion of render to texture etc., the frame will be
missed.
2014-08-27 20:56:19 -06:00
|
|
|
|
2017-03-26 18:38:19 -06:00
|
|
|
const size_t buf_size = size * sizeof(u32);
|
2016-01-12 14:44:58 -07:00
|
|
|
Fifo::PushFifoAuxBuffer(new_data, buf_size);
|
Add the 'desynced GPU thread' mode.
It's a relatively big commit (less big with -w), but it's hard to test
any of this separately...
The basic problem is that in netplay or movies, the state of the CPU
must be deterministic, including when the game receives notification
that the GPU has processed FIFO data. Dual core mode notifies the game
whenever the GPU thread actually gets around to doing the work, so it
isn't deterministic. Single core mode is because it notifies the game
'instantly' (after processing the data synchronously), but it's too slow
for many systems and games.
My old dc-netplay branch worked as follows: everything worked as normal
except the state of the CP registers was a lie, and the CPU thread only
delivered results when idle detection triggered (waiting for the GPU if
they weren't ready at that point). Usually, a game is idle iff all the
work for the frame has been done, except for a small amount of work
depending on the GPU result, so neither the CPU or the GPU waiting on
the other affected performance much. However, it's possible that the
game could be waiting for some earlier interrupt, and any of several
games which, for whatever reason, never went into a detectable idle
(even when I tried to improve the detection) would never receive results
at all. (The current method should have better compatibility, but it
also has slightly higher overhead and breaks some other things, so I
want to reimplement this, hopefully with less impact on the code, in the
future.)
With this commit, the basic idea is that the CPU thread acts as if the
work has been done instantly, like single core mode, but actually hands
it off asynchronously to the GPU thread (after backing up some data that
the game might change in memory before it's actually done). Since the
work isn't done, any feedback from the GPU to the CPU, such as real
XFB/EFB copies (virtual are OK), EFB pokes, performance queries, etc. is
broken; but most games work with these options disabled, and there is no
need to try to detect what the CPU thread is doing.
Technically: when the flag g_use_deterministic_gpu_thread (currently
stuck on) is on, the CPU thread calls RunGpu like in single core mode.
This function synchronously copies the data from the FIFO to the
internal video buffer and updates the CP registers, interrupts, etc.
However, instead of the regular ReadDataFromFifo followed by running the
opcode decoder, it runs ReadDataFromFifoOnCPU ->
OpcodeDecoder_Preprocess, which relatively quickly scans through the
FIFO data, detects SetFinish calls etc., which are immediately fired,
and saves certain associated data from memory (e.g. display lists) in
AuxBuffers (a parallel stream to the main FIFO, which is a bit slow at
the moment), before handing the data off to the GPU thread to actually
render. That makes up the bulk of this commit.
In various circumstances, including the aforementioned EFB pokes and
performance queries as well as swap requests (i.e. the end of a frame -
we don't want the CPU potentially pumping out frames too quickly and the
GPU falling behind*), SyncGPU is called to wait for actual completion.
The overhead mainly comes from OpcodeDecoder_Preprocess (which is,
again, synchronous), as well as the actual copying.
Currently, display lists and such are escrowed from main memory even
though they usually won't change over the course of a frame, and
textures are not even though they might, resulting in a small chance of
graphical glitches. When the texture locking (i.e. fault on write) code
lands, I can make this all correct and maybe a little faster.
* This suggests an alternate determinism method of just delaying results
until a short time before the end of each frame. For all I know this
might mostly work - I haven't tried it - but if any significant work
hinges on the competion of render to texture etc., the frame will be
missed.
2014-08-27 20:56:19 -06:00
|
|
|
}
|
2021-02-06 22:14:21 -07:00
|
|
|
|
|
|
|
std::pair<std::string, std::string> GetXFRegInfo(u32 address, u32 value)
|
|
|
|
{
|
|
|
|
// Macro to set the register name and make sure it was written correctly via compile time assertion
|
|
|
|
#define RegName(reg) ((void)(reg), #reg)
|
|
|
|
#define DescriptionlessReg(reg) std::make_pair(RegName(reg), "");
|
|
|
|
|
|
|
|
switch (address)
|
|
|
|
{
|
|
|
|
case XFMEM_ERROR:
|
|
|
|
return DescriptionlessReg(XFMEM_ERROR);
|
|
|
|
case XFMEM_DIAG:
|
|
|
|
return DescriptionlessReg(XFMEM_DIAG);
|
|
|
|
case XFMEM_STATE0: // internal state 0
|
|
|
|
return std::make_pair(RegName(XFMEM_STATE0), "internal state 0");
|
|
|
|
case XFMEM_STATE1: // internal state 1
|
|
|
|
return std::make_pair(RegName(XFMEM_STATE1), "internal state 1");
|
|
|
|
case XFMEM_CLOCK:
|
|
|
|
return DescriptionlessReg(XFMEM_CLOCK);
|
|
|
|
case XFMEM_SETGPMETRIC:
|
|
|
|
return DescriptionlessReg(XFMEM_SETGPMETRIC);
|
|
|
|
|
|
|
|
case XFMEM_CLIPDISABLE:
|
|
|
|
return std::make_pair(RegName(XFMEM_CLIPDISABLE), fmt::to_string(ClipDisable{.hex = value}));
|
|
|
|
|
|
|
|
case XFMEM_VTXSPECS:
|
|
|
|
return std::make_pair(RegName(XFMEM_VTXSPECS), fmt::to_string(INVTXSPEC{.hex = value}));
|
|
|
|
|
|
|
|
case XFMEM_SETNUMCHAN:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETNUMCHAN),
|
|
|
|
fmt::format("Number of color channels: {}", value & 3));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFMEM_SETCHAN0_AMBCOLOR:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETCHAN0_AMBCOLOR),
|
|
|
|
fmt::format("Channel 0 Ambient Color: {:06x}", value));
|
|
|
|
case XFMEM_SETCHAN1_AMBCOLOR:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETCHAN1_AMBCOLOR),
|
|
|
|
fmt::format("Channel 1 Ambient Color: {:06x}", value));
|
|
|
|
|
|
|
|
case XFMEM_SETCHAN0_MATCOLOR:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETCHAN0_MATCOLOR),
|
|
|
|
fmt::format("Channel 0 Material Color: {:06x}", value));
|
|
|
|
case XFMEM_SETCHAN1_MATCOLOR:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETCHAN1_MATCOLOR),
|
|
|
|
fmt::format("Channel 1 Material Color: {:06x}", value));
|
|
|
|
|
|
|
|
case XFMEM_SETCHAN0_COLOR: // Channel Color
|
|
|
|
return std::make_pair(RegName(XFMEM_SETCHAN0_COLOR),
|
|
|
|
fmt::format("Channel 0 Color config:\n{}", LitChannel{.hex = value}));
|
|
|
|
case XFMEM_SETCHAN1_COLOR:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETCHAN1_COLOR),
|
|
|
|
fmt::format("Channel 1 Color config:\n{}", LitChannel{.hex = value}));
|
|
|
|
case XFMEM_SETCHAN0_ALPHA: // Channel Alpha
|
|
|
|
return std::make_pair(RegName(XFMEM_SETCHAN0_ALPHA),
|
|
|
|
fmt::format("Channel 0 Alpha config:\n{}", LitChannel{.hex = value}));
|
|
|
|
case XFMEM_SETCHAN1_ALPHA:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETCHAN1_ALPHA),
|
|
|
|
fmt::format("Channel 1 Alpha config:\n{}", LitChannel{.hex = value}));
|
|
|
|
|
|
|
|
case XFMEM_DUALTEX:
|
|
|
|
return std::make_pair(RegName(XFMEM_DUALTEX),
|
|
|
|
fmt::format("Dual Tex Trans {}", (value & 1) ? "enabled" : "disabled"));
|
|
|
|
|
|
|
|
case XFMEM_SETMATRIXINDA:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETMATRIXINDA),
|
|
|
|
fmt::format("Matrix index A:\n{}", TMatrixIndexA{.Hex = value}));
|
|
|
|
case XFMEM_SETMATRIXINDB:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETMATRIXINDB),
|
|
|
|
fmt::format("Matrix index B:\n{}", TMatrixIndexB{.Hex = value}));
|
|
|
|
|
|
|
|
case XFMEM_SETVIEWPORT:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETVIEWPORT + 0),
|
|
|
|
fmt::format("Viewport width: {}", Common::BitCast<float>(value)));
|
|
|
|
case XFMEM_SETVIEWPORT + 1:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETVIEWPORT + 1),
|
|
|
|
fmt::format("Viewport height: {}", Common::BitCast<float>(value)));
|
|
|
|
case XFMEM_SETVIEWPORT + 2:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETVIEWPORT + 2),
|
|
|
|
fmt::format("Viewport z range: {}", Common::BitCast<float>(value)));
|
|
|
|
case XFMEM_SETVIEWPORT + 3:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETVIEWPORT + 3),
|
|
|
|
fmt::format("Viewport x origin: {}", Common::BitCast<float>(value)));
|
|
|
|
case XFMEM_SETVIEWPORT + 4:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETVIEWPORT + 4),
|
|
|
|
fmt::format("Viewport y origin: {}", Common::BitCast<float>(value)));
|
|
|
|
case XFMEM_SETVIEWPORT + 5:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETVIEWPORT + 5),
|
|
|
|
fmt::format("Viewport far z: {}", Common::BitCast<float>(value)));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case XFMEM_SETPROJECTION:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETPROJECTION + 0),
|
|
|
|
fmt::format("Projection[0]: {}", Common::BitCast<float>(value)));
|
|
|
|
case XFMEM_SETPROJECTION + 1:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETPROJECTION + 1),
|
|
|
|
fmt::format("Projection[1]: {}", Common::BitCast<float>(value)));
|
|
|
|
case XFMEM_SETPROJECTION + 2:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETPROJECTION + 2),
|
|
|
|
fmt::format("Projection[2]: {}", Common::BitCast<float>(value)));
|
|
|
|
case XFMEM_SETPROJECTION + 3:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETPROJECTION + 3),
|
|
|
|
fmt::format("Projection[3]: {}", Common::BitCast<float>(value)));
|
|
|
|
case XFMEM_SETPROJECTION + 4:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETPROJECTION + 4),
|
|
|
|
fmt::format("Projection[4]: {}", Common::BitCast<float>(value)));
|
|
|
|
case XFMEM_SETPROJECTION + 5:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETPROJECTION + 5),
|
|
|
|
fmt::format("Projection[5]: {}", Common::BitCast<float>(value)));
|
|
|
|
case XFMEM_SETPROJECTION + 6:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETPROJECTION + 6),
|
|
|
|
fmt::to_string(static_cast<ProjectionType>(value)));
|
|
|
|
|
|
|
|
case XFMEM_SETNUMTEXGENS:
|
|
|
|
return std::make_pair(RegName(XFMEM_SETNUMTEXGENS),
|
|
|
|
fmt::format("Number of tex gens: {}", value & 15));
|
|
|
|
|
|
|
|
case XFMEM_SETTEXMTXINFO:
|
|
|
|
case XFMEM_SETTEXMTXINFO + 1:
|
|
|
|
case XFMEM_SETTEXMTXINFO + 2:
|
|
|
|
case XFMEM_SETTEXMTXINFO + 3:
|
|
|
|
case XFMEM_SETTEXMTXINFO + 4:
|
|
|
|
case XFMEM_SETTEXMTXINFO + 5:
|
|
|
|
case XFMEM_SETTEXMTXINFO + 6:
|
|
|
|
case XFMEM_SETTEXMTXINFO + 7:
|
|
|
|
return std::make_pair(
|
|
|
|
fmt::format("XFMEM_SETTEXMTXINFO Matrix {}", address - XFMEM_SETTEXMTXINFO),
|
|
|
|
fmt::to_string(TexMtxInfo{.hex = value}));
|
|
|
|
|
|
|
|
case XFMEM_SETPOSTMTXINFO:
|
|
|
|
case XFMEM_SETPOSTMTXINFO + 1:
|
|
|
|
case XFMEM_SETPOSTMTXINFO + 2:
|
|
|
|
case XFMEM_SETPOSTMTXINFO + 3:
|
|
|
|
case XFMEM_SETPOSTMTXINFO + 4:
|
|
|
|
case XFMEM_SETPOSTMTXINFO + 5:
|
|
|
|
case XFMEM_SETPOSTMTXINFO + 6:
|
|
|
|
case XFMEM_SETPOSTMTXINFO + 7:
|
|
|
|
return std::make_pair(
|
|
|
|
fmt::format("XFMEM_SETPOSTMTXINFO Matrix {}", address - XFMEM_SETPOSTMTXINFO),
|
|
|
|
fmt::to_string(PostMtxInfo{.hex = value}));
|
|
|
|
|
|
|
|
// --------------
|
|
|
|
// Unknown Regs
|
|
|
|
// --------------
|
|
|
|
|
|
|
|
// Maybe these are for Normals?
|
|
|
|
case 0x1048: // xfmem.texcoords[0].nrmmtxinfo.hex = data; break; ??
|
|
|
|
case 0x1049:
|
|
|
|
case 0x104a:
|
|
|
|
case 0x104b:
|
|
|
|
case 0x104c:
|
|
|
|
case 0x104d:
|
|
|
|
case 0x104e:
|
|
|
|
case 0x104f:
|
|
|
|
return std::make_pair(
|
|
|
|
fmt::format("Possible Normal Mtx XF reg?: {:x}={:x}", address, value),
|
|
|
|
"Maybe these are for Normals? xfmem.texcoords[0].nrmmtxinfo.hex = data; break; ??");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x1013:
|
|
|
|
case 0x1014:
|
|
|
|
case 0x1015:
|
|
|
|
case 0x1016:
|
|
|
|
case 0x1017:
|
|
|
|
|
|
|
|
default:
|
|
|
|
return std::make_pair(fmt::format("Unknown XF Reg: {:x}={:x}", address, value), "");
|
|
|
|
}
|
|
|
|
#undef RegName
|
|
|
|
#undef DescriptionlessReg
|
|
|
|
}
|
|
|
|
|
2021-02-07 00:30:01 -07:00
|
|
|
std::string GetXFMemName(u32 address)
|
|
|
|
{
|
|
|
|
if (address >= XFMEM_POSMATRICES && address < XFMEM_POSMATRICES_END)
|
|
|
|
{
|
|
|
|
const u32 row = (address - XFMEM_POSMATRICES) / 4;
|
|
|
|
const u32 col = (address - XFMEM_POSMATRICES) % 4;
|
|
|
|
return fmt::format("Position matrix row {:2d} col {:2d}", row, col);
|
|
|
|
}
|
|
|
|
else if (address >= XFMEM_NORMALMATRICES && address < XFMEM_NORMALMATRICES_END)
|
|
|
|
{
|
|
|
|
const u32 row = (address - XFMEM_NORMALMATRICES) / 3;
|
|
|
|
const u32 col = (address - XFMEM_NORMALMATRICES) % 3;
|
|
|
|
return fmt::format("Normal matrix row {:2d} col {:2d}", row, col);
|
|
|
|
}
|
|
|
|
else if (address >= XFMEM_POSTMATRICES && address < XFMEM_POSTMATRICES_END)
|
|
|
|
{
|
|
|
|
const u32 row = (address - XFMEM_POSMATRICES) / 4;
|
|
|
|
const u32 col = (address - XFMEM_POSMATRICES) % 4;
|
|
|
|
return fmt::format("Post matrix row {:2d} col {:2d}", row, col);
|
|
|
|
}
|
|
|
|
else if (address >= XFMEM_LIGHTS && address < XFMEM_LIGHTS_END)
|
|
|
|
{
|
|
|
|
const u32 light = (address - XFMEM_LIGHTS) / 16;
|
|
|
|
const u32 offset = (address - XFMEM_LIGHTS) % 16;
|
|
|
|
switch (offset)
|
|
|
|
{
|
|
|
|
default:
|
|
|
|
return fmt::format("Light {} unused param {}", light, offset);
|
|
|
|
case 3:
|
|
|
|
return fmt::format("Light {} color", light);
|
|
|
|
case 4:
|
|
|
|
case 5:
|
|
|
|
case 6:
|
|
|
|
return fmt::format("Light {} cosine attenuation {}", light, offset - 4);
|
|
|
|
case 7:
|
|
|
|
case 8:
|
|
|
|
case 9:
|
|
|
|
return fmt::format("Light {} distance attenuation {}", light, offset - 7);
|
|
|
|
case 10:
|
|
|
|
case 11:
|
|
|
|
case 12:
|
|
|
|
// Yagcd says light pos or "inf ldir", while dolphin has a union for dpos and sdir with only
|
|
|
|
// dpos being used nowadays. As far as I can tell only the DX9 engine once at
|
|
|
|
// Source/Plugins/Plugin_VideoDX9/Src/TransformEngine.cpp used sdir directly...
|
|
|
|
return fmt::format("Light {0} {1} position or inf ldir {1}", light, "xyz"[offset - 10]);
|
|
|
|
case 13:
|
|
|
|
case 14:
|
|
|
|
case 15:
|
|
|
|
// Yagcd says light dir or "1/2 angle", dolphin has union for ddir or shalfangle.
|
|
|
|
// It would make sense if d stood for direction and s for specular, but it's ddir and
|
|
|
|
// shalfhangle that have the comment "specular lights only", both at the same offset,
|
|
|
|
// while dpos and sdir have none...
|
|
|
|
return fmt::format("Light {0} {1} direction or half hangle {1}", light, "xyz"[offset - 13]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return fmt::format("Unknown memory {:04x}", address);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string GetXFMemDescription(u32 address, u32 value)
|
|
|
|
{
|
|
|
|
if ((address >= XFMEM_POSMATRICES && address < XFMEM_POSMATRICES_END) ||
|
|
|
|
(address >= XFMEM_NORMALMATRICES && address < XFMEM_NORMALMATRICES_END) ||
|
|
|
|
(address >= XFMEM_POSTMATRICES && address < XFMEM_POSTMATRICES_END))
|
|
|
|
{
|
|
|
|
// The matrices all use floats
|
|
|
|
return fmt::format("{} = {}", GetXFMemName(address), Common::BitCast<float>(value));
|
|
|
|
}
|
|
|
|
else if (address >= XFMEM_LIGHTS && address < XFMEM_LIGHTS_END)
|
|
|
|
{
|
|
|
|
// Each light is 16 words; for this function we don't care which light it is
|
|
|
|
const u32 offset = (address - XFMEM_LIGHTS) % 16;
|
|
|
|
if (offset <= 3)
|
|
|
|
{
|
|
|
|
// The unused parameters (0, 1, 2) and the color (3) should be hex-formatted
|
|
|
|
return fmt::format("{} = {:08x}", GetXFMemName(address), value);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Everything else is a float
|
|
|
|
return fmt::format("{} = {}", GetXFMemName(address), Common::BitCast<float>(value));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Unknown address
|
|
|
|
return fmt::format("{} = {:08x}", GetXFMemName(address), value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-06 22:14:21 -07:00
|
|
|
std::pair<std::string, std::string> GetXFTransferInfo(const u8* data)
|
|
|
|
{
|
|
|
|
const u32 cmd = Common::swap32(data);
|
|
|
|
data += 4;
|
|
|
|
u32 base_address = cmd & 0xFFFF;
|
|
|
|
const u32 transfer_size = ((cmd >> 16) & 15) + 1;
|
|
|
|
|
|
|
|
if (base_address > XFMEM_REGISTERS_END)
|
|
|
|
{
|
|
|
|
return std::make_pair("Invalid XF Transfer", "Base address past end of address space");
|
|
|
|
}
|
|
|
|
else if (transfer_size == 1 && base_address >= XFMEM_REGISTERS_START)
|
|
|
|
{
|
|
|
|
// Write directly to a single register
|
|
|
|
const u32 value = Common::swap32(data);
|
|
|
|
return GetXFRegInfo(base_address, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
// More complicated cases
|
|
|
|
fmt::memory_buffer name, desc;
|
|
|
|
u32 end_address = base_address + transfer_size; // exclusive
|
|
|
|
|
|
|
|
// do not allow writes past registers
|
|
|
|
if (end_address > XFMEM_REGISTERS_END)
|
|
|
|
{
|
|
|
|
fmt::format_to(name, "Invalid XF Transfer ");
|
|
|
|
fmt::format_to(desc, "Transfer ends past end of address space\n\n");
|
|
|
|
end_address = XFMEM_REGISTERS_END;
|
|
|
|
}
|
|
|
|
|
|
|
|
// write to XF mem
|
|
|
|
if (base_address < XFMEM_REGISTERS_START)
|
|
|
|
{
|
|
|
|
const u32 xf_mem_base = base_address;
|
|
|
|
u32 xf_mem_transfer_size = transfer_size;
|
|
|
|
|
|
|
|
if (end_address > XFMEM_REGISTERS_START)
|
|
|
|
{
|
|
|
|
xf_mem_transfer_size = XFMEM_REGISTERS_START - base_address;
|
|
|
|
base_address = XFMEM_REGISTERS_START;
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt::format_to(name, "Write {} XF mem words at {:04x}", xf_mem_transfer_size, xf_mem_base);
|
|
|
|
|
2021-02-07 00:30:01 -07:00
|
|
|
for (u32 i = 0; i < xf_mem_transfer_size; i++)
|
|
|
|
{
|
|
|
|
const auto mem_desc = GetXFMemDescription(xf_mem_base + i, Common::swap32(data));
|
|
|
|
fmt::format_to(desc, i == 0 ? "{}" : "\n{}", mem_desc);
|
|
|
|
data += 4;
|
|
|
|
}
|
|
|
|
|
2021-02-06 22:14:21 -07:00
|
|
|
if (end_address > XFMEM_REGISTERS_START)
|
|
|
|
fmt::format_to(name, "; ");
|
|
|
|
}
|
|
|
|
|
|
|
|
// write to XF regs
|
|
|
|
if (base_address >= XFMEM_REGISTERS_START)
|
|
|
|
{
|
|
|
|
fmt::format_to(name, "Write {} XF regs at {:04x}", end_address - base_address, base_address);
|
|
|
|
|
|
|
|
for (u32 address = base_address; address < end_address; address++)
|
|
|
|
{
|
|
|
|
const u32 value = Common::swap32(data);
|
|
|
|
|
|
|
|
const auto [regname, regdesc] = GetXFRegInfo(address, value);
|
|
|
|
fmt::format_to(desc, "{}\n{}\n", regname, regdesc);
|
|
|
|
|
|
|
|
data += 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return std::make_pair(fmt::to_string(name), fmt::to_string(desc));
|
|
|
|
}
|
2021-02-20 14:17:42 -07:00
|
|
|
|
|
|
|
std::pair<std::string, std::string> GetXFIndexedLoadInfo(u8 array, u32 value)
|
|
|
|
{
|
|
|
|
const auto [index, address, size] = ExtractIndexedXF(value);
|
|
|
|
|
|
|
|
const auto desc = fmt::format("Load {} bytes to XF address {:03x} from CP array {} row {}", size,
|
|
|
|
address, array, index);
|
|
|
|
fmt::memory_buffer written;
|
|
|
|
for (u32 i = 0; i < size; i++)
|
|
|
|
{
|
|
|
|
fmt::format_to(written, "{}\n", GetXFMemName(address + i));
|
|
|
|
}
|
|
|
|
|
|
|
|
return std::make_pair(desc, fmt::to_string(written));
|
|
|
|
}
|