mirror of
https://github.com/dolphin-emu/dolphin.git
synced 2025-07-23 06:09:50 -06:00
VideoCommon: Abstract bounding box
This moves much of the duplicated bounding box code into VideoCommon, leaving only the specific buffer implementations in each backend.
This commit is contained in:
@ -3,7 +3,6 @@
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "Common/Assert.h"
|
||||
#include "Common/Logging/Log.h"
|
||||
|
||||
#include "VideoBackends/Vulkan/CommandBufferManager.h"
|
||||
@ -16,11 +15,7 @@
|
||||
|
||||
namespace Vulkan
|
||||
{
|
||||
BoundingBox::BoundingBox()
|
||||
{
|
||||
}
|
||||
|
||||
BoundingBox::~BoundingBox()
|
||||
VKBoundingBox::~VKBoundingBox()
|
||||
{
|
||||
if (m_gpu_buffer != VK_NULL_HANDLE)
|
||||
{
|
||||
@ -29,14 +24,8 @@ BoundingBox::~BoundingBox()
|
||||
}
|
||||
}
|
||||
|
||||
bool BoundingBox::Initialize()
|
||||
bool VKBoundingBox::Initialize()
|
||||
{
|
||||
if (!g_ActiveConfig.backend_info.bSupportsBBox)
|
||||
{
|
||||
WARN_LOG_FMT(VIDEO, "Vulkan: Bounding box is unsupported by your device.");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!CreateGPUBuffer())
|
||||
return false;
|
||||
|
||||
@ -48,103 +37,71 @@ bool BoundingBox::Initialize()
|
||||
return true;
|
||||
}
|
||||
|
||||
void BoundingBox::Flush()
|
||||
std::vector<BBoxType> VKBoundingBox::Read(u32 index, u32 length)
|
||||
{
|
||||
if (m_gpu_buffer == VK_NULL_HANDLE)
|
||||
return;
|
||||
// Can't be done within a render pass.
|
||||
StateTracker::GetInstance()->EndRenderPass();
|
||||
|
||||
// Combine updates together, chances are the game would have written all 4.
|
||||
bool updated_buffer = false;
|
||||
for (size_t start = 0; start < 4; start++)
|
||||
{
|
||||
if (!m_values_dirty[start])
|
||||
continue;
|
||||
// Ensure all writes are completed to the GPU buffer prior to the transfer.
|
||||
StagingBuffer::BufferMemoryBarrier(
|
||||
g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
|
||||
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, 0,
|
||||
BUFFER_SIZE, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
m_readback_buffer->PrepareForGPUWrite(g_command_buffer_mgr->GetCurrentCommandBuffer(),
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
|
||||
size_t count = 0;
|
||||
std::array<s32, 4> write_values;
|
||||
for (; (start + count) < 4; count++)
|
||||
{
|
||||
if (!m_values_dirty[start + count])
|
||||
break;
|
||||
// Copy from GPU -> readback buffer.
|
||||
VkBufferCopy region = {0, 0, BUFFER_SIZE};
|
||||
vkCmdCopyBuffer(g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
|
||||
m_readback_buffer->GetBuffer(), 1, ®ion);
|
||||
|
||||
m_readback_buffer->Read((start + count) * sizeof(s32), &write_values[count], sizeof(s32),
|
||||
false);
|
||||
m_values_dirty[start + count] = false;
|
||||
}
|
||||
// Restore GPU buffer access.
|
||||
StagingBuffer::BufferMemoryBarrier(
|
||||
g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer, VK_ACCESS_TRANSFER_READ_BIT,
|
||||
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, 0, BUFFER_SIZE,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
|
||||
m_readback_buffer->FlushGPUCache(g_command_buffer_mgr->GetCurrentCommandBuffer(),
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
|
||||
// We can't issue vkCmdUpdateBuffer within a render pass.
|
||||
// However, the writes must be serialized, so we can't put it in the init buffer.
|
||||
if (!updated_buffer)
|
||||
{
|
||||
StateTracker::GetInstance()->EndRenderPass();
|
||||
// Wait until these commands complete.
|
||||
Renderer::GetInstance()->ExecuteCommandBuffer(false, true);
|
||||
|
||||
// Ensure GPU buffer is in a state where it can be transferred to.
|
||||
StagingBuffer::BufferMemoryBarrier(
|
||||
g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
|
||||
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, 0,
|
||||
BUFFER_SIZE, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
// Cache is now valid.
|
||||
m_readback_buffer->InvalidateCPUCache();
|
||||
|
||||
updated_buffer = true;
|
||||
}
|
||||
// Read out the values and return
|
||||
std::vector<BBoxType> values(length);
|
||||
m_readback_buffer->Read(index * sizeof(BBoxType), values.data(), length * sizeof(BBoxType),
|
||||
false);
|
||||
return values;
|
||||
}
|
||||
|
||||
vkCmdUpdateBuffer(g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
|
||||
start * sizeof(s32), count * sizeof(s32),
|
||||
reinterpret_cast<const u32*>(write_values.data()));
|
||||
}
|
||||
void VKBoundingBox::Write(u32 index, const std::vector<BBoxType>& values)
|
||||
{
|
||||
// We can't issue vkCmdUpdateBuffer within a render pass.
|
||||
// However, the writes must be serialized, so we can't put it in the init buffer.
|
||||
StateTracker::GetInstance()->EndRenderPass();
|
||||
|
||||
// Ensure GPU buffer is in a state where it can be transferred to.
|
||||
StagingBuffer::BufferMemoryBarrier(
|
||||
g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
|
||||
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, 0,
|
||||
BUFFER_SIZE, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
|
||||
// Write the values to the GPU buffer
|
||||
vkCmdUpdateBuffer(g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
|
||||
index * sizeof(BBoxType), values.size() * sizeof(BBoxType),
|
||||
reinterpret_cast<const BBoxType*>(values.data()));
|
||||
|
||||
// Restore fragment shader access to the buffer.
|
||||
if (updated_buffer)
|
||||
{
|
||||
StagingBuffer::BufferMemoryBarrier(
|
||||
g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer, VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, 0, BUFFER_SIZE,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
|
||||
}
|
||||
|
||||
// We're now up-to-date.
|
||||
m_valid = true;
|
||||
StagingBuffer::BufferMemoryBarrier(
|
||||
g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer, VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, 0, BUFFER_SIZE,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
|
||||
}
|
||||
|
||||
void BoundingBox::Invalidate()
|
||||
{
|
||||
if (m_gpu_buffer == VK_NULL_HANDLE)
|
||||
return;
|
||||
|
||||
m_valid = false;
|
||||
}
|
||||
|
||||
s32 BoundingBox::Get(size_t index)
|
||||
{
|
||||
ASSERT(index < NUM_VALUES);
|
||||
|
||||
if (!m_valid)
|
||||
Readback();
|
||||
|
||||
s32 value;
|
||||
m_readback_buffer->Read(index * sizeof(s32), &value, sizeof(value), false);
|
||||
return value;
|
||||
}
|
||||
|
||||
void BoundingBox::Set(size_t index, s32 value)
|
||||
{
|
||||
ASSERT(index < NUM_VALUES);
|
||||
|
||||
// If we're currently valid, update the stored value in both our cache and the GPU buffer.
|
||||
if (m_valid)
|
||||
{
|
||||
// Skip when it hasn't changed.
|
||||
s32 current_value;
|
||||
m_readback_buffer->Read(index * sizeof(s32), ¤t_value, sizeof(current_value), false);
|
||||
if (current_value == value)
|
||||
return;
|
||||
}
|
||||
|
||||
// Flag as dirty, and update values.
|
||||
m_readback_buffer->Write(index * sizeof(s32), &value, sizeof(value), true);
|
||||
m_values_dirty[index] = true;
|
||||
}
|
||||
|
||||
bool BoundingBox::CreateGPUBuffer()
|
||||
bool VKBoundingBox::CreateGPUBuffer()
|
||||
{
|
||||
VkBufferUsageFlags buffer_usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
|
||||
@ -204,7 +161,7 @@ bool BoundingBox::CreateGPUBuffer()
|
||||
return true;
|
||||
}
|
||||
|
||||
bool BoundingBox::CreateReadbackBuffer()
|
||||
bool VKBoundingBox::CreateReadbackBuffer()
|
||||
{
|
||||
m_readback_buffer = StagingBuffer::Create(STAGING_BUFFER_TYPE_READBACK, BUFFER_SIZE,
|
||||
VK_BUFFER_USAGE_TRANSFER_DST_BIT);
|
||||
@ -215,39 +172,4 @@ bool BoundingBox::CreateReadbackBuffer()
|
||||
return true;
|
||||
}
|
||||
|
||||
void BoundingBox::Readback()
|
||||
{
|
||||
// Can't be done within a render pass.
|
||||
StateTracker::GetInstance()->EndRenderPass();
|
||||
|
||||
// Ensure all writes are completed to the GPU buffer prior to the transfer.
|
||||
StagingBuffer::BufferMemoryBarrier(
|
||||
g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
|
||||
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, 0,
|
||||
BUFFER_SIZE, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
m_readback_buffer->PrepareForGPUWrite(g_command_buffer_mgr->GetCurrentCommandBuffer(),
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
|
||||
// Copy from GPU -> readback buffer.
|
||||
VkBufferCopy region = {0, 0, BUFFER_SIZE};
|
||||
vkCmdCopyBuffer(g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
|
||||
m_readback_buffer->GetBuffer(), 1, ®ion);
|
||||
|
||||
// Restore GPU buffer access.
|
||||
StagingBuffer::BufferMemoryBarrier(
|
||||
g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer, VK_ACCESS_TRANSFER_READ_BIT,
|
||||
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, 0, BUFFER_SIZE,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
|
||||
m_readback_buffer->FlushGPUCache(g_command_buffer_mgr->GetCurrentCommandBuffer(),
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
|
||||
// Wait until these commands complete.
|
||||
Renderer::GetInstance()->ExecuteCommandBuffer(false, true);
|
||||
|
||||
// Cache is now valid.
|
||||
m_readback_buffer->InvalidateCPUCache();
|
||||
m_valid = true;
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
|
Reference in New Issue
Block a user