mirror of
https://github.com/dolphin-emu/dolphin.git
synced 2025-07-30 01:29:42 -06:00
Assert: Uppercase assertion macros
Macros should be all upper-cased. This is also kind of a wart that's been sticking out for quite a while now (we avoid prefixing underscores).
This commit is contained in:
@ -115,7 +115,7 @@ void BoundingBox::Invalidate()
|
||||
|
||||
s32 BoundingBox::Get(size_t index)
|
||||
{
|
||||
_assert_(index < NUM_VALUES);
|
||||
ASSERT(index < NUM_VALUES);
|
||||
|
||||
if (!m_valid)
|
||||
Readback();
|
||||
@ -127,7 +127,7 @@ s32 BoundingBox::Get(size_t index)
|
||||
|
||||
void BoundingBox::Set(size_t index, s32 value)
|
||||
{
|
||||
_assert_(index < NUM_VALUES);
|
||||
ASSERT(index < NUM_VALUES);
|
||||
|
||||
// If we're currently valid, update the stored value in both our cache and the GPU buffer.
|
||||
if (m_valid)
|
||||
|
@ -226,7 +226,7 @@ void CommandBufferManager::WaitForFence(VkFence fence)
|
||||
if (m_frame_resources[command_buffer_index].fence == fence)
|
||||
break;
|
||||
}
|
||||
_assert_(command_buffer_index < m_frame_resources.size());
|
||||
ASSERT(command_buffer_index < m_frame_resources.size());
|
||||
|
||||
// Has this command buffer already been waited for?
|
||||
if (!m_frame_resources[command_buffer_index].needs_fence_wait)
|
||||
@ -342,7 +342,7 @@ void CommandBufferManager::SubmitCommandBuffer(size_t index, VkSemaphore wait_se
|
||||
if (present_swap_chain != VK_NULL_HANDLE)
|
||||
{
|
||||
// Should have a signal semaphore.
|
||||
_assert_(signal_semaphore != VK_NULL_HANDLE);
|
||||
ASSERT(signal_semaphore != VK_NULL_HANDLE);
|
||||
VkPresentInfoKHR present_info = {VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
|
||||
nullptr,
|
||||
1,
|
||||
@ -489,14 +489,14 @@ void CommandBufferManager::AddFencePointCallback(
|
||||
const CommandBufferExecutedCallback& executed_callback)
|
||||
{
|
||||
// Shouldn't be adding twice.
|
||||
_assert_(m_fence_point_callbacks.find(key) == m_fence_point_callbacks.end());
|
||||
ASSERT(m_fence_point_callbacks.find(key) == m_fence_point_callbacks.end());
|
||||
m_fence_point_callbacks.emplace(key, std::make_pair(queued_callback, executed_callback));
|
||||
}
|
||||
|
||||
void CommandBufferManager::RemoveFencePointCallback(const void* key)
|
||||
{
|
||||
auto iter = m_fence_point_callbacks.find(key);
|
||||
_assert_(iter != m_fence_point_callbacks.end());
|
||||
ASSERT(iter != m_fence_point_callbacks.end());
|
||||
m_fence_point_callbacks.erase(iter);
|
||||
}
|
||||
|
||||
|
@ -393,9 +393,9 @@ Texture2D* FramebufferManager::ResolveEFBColorTexture(const VkRect2D& region)
|
||||
|
||||
// It's not valid to resolve out-of-bounds coordinates.
|
||||
// Ensuring the region is within the image is the caller's responsibility.
|
||||
_assert_(region.offset.x >= 0 && region.offset.y >= 0 &&
|
||||
(static_cast<u32>(region.offset.x) + region.extent.width) <= GetEFBWidth() &&
|
||||
(static_cast<u32>(region.offset.y) + region.extent.height) <= GetEFBHeight());
|
||||
ASSERT(region.offset.x >= 0 && region.offset.y >= 0 &&
|
||||
(static_cast<u32>(region.offset.x) + region.extent.width) <= GetEFBWidth() &&
|
||||
(static_cast<u32>(region.offset.y) + region.extent.height) <= GetEFBHeight());
|
||||
|
||||
// Resolving is considered to be a transfer operation.
|
||||
m_efb_color_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
|
||||
|
@ -76,7 +76,7 @@ void PerfQuery::EnableQuery(PerfQueryGroup type)
|
||||
{
|
||||
u32 index = (m_query_read_pos + m_query_count) % PERF_QUERY_BUFFER_SIZE;
|
||||
ActiveQuery& entry = m_query_buffer[index];
|
||||
_assert_(!entry.active && !entry.available);
|
||||
ASSERT(!entry.active && !entry.available);
|
||||
entry.active = true;
|
||||
m_query_count++;
|
||||
|
||||
@ -245,12 +245,12 @@ void PerfQuery::OnCommandBufferQueued(VkCommandBuffer command_buffer, VkFence fe
|
||||
if (entry.available)
|
||||
{
|
||||
// These should be grouped together, and at the start.
|
||||
_assert_(copy_count == 0);
|
||||
ASSERT(copy_count == 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this wrapped around, we need to flush the entries before the end of the buffer.
|
||||
_assert_(entry.active);
|
||||
ASSERT(entry.active);
|
||||
if (index < copy_start_index)
|
||||
{
|
||||
QueueCopyQueryResults(command_buffer, fence, copy_start_index, copy_count);
|
||||
@ -311,7 +311,7 @@ void PerfQuery::ProcessResults(u32 start_index, u32 query_count)
|
||||
query_count * sizeof(PerfQueryDataType));
|
||||
|
||||
// Should be at maximum query_count queries pending.
|
||||
_assert_(query_count <= m_query_count);
|
||||
ASSERT(query_count <= m_query_count);
|
||||
DEBUG_LOG(VIDEO, "process queries %u-%u", start_index, start_index + query_count - 1);
|
||||
|
||||
// Remove pending queries.
|
||||
@ -321,7 +321,7 @@ void PerfQuery::ProcessResults(u32 start_index, u32 query_count)
|
||||
ActiveQuery& entry = m_query_buffer[index];
|
||||
|
||||
// Should have a fence associated with it (waiting for a result).
|
||||
_assert_(entry.pending_fence != VK_NULL_HANDLE);
|
||||
ASSERT(entry.pending_fence != VK_NULL_HANDLE);
|
||||
entry.pending_fence = VK_NULL_HANDLE;
|
||||
entry.available = false;
|
||||
entry.active = false;
|
||||
|
@ -117,13 +117,13 @@ void VulkanPostProcessing::FillUniformBuffer(u8* buf, const TargetRectangle& src
|
||||
break;
|
||||
|
||||
case PostProcessingShaderConfiguration::ConfigurationOption::OptionType::OPTION_INTEGER:
|
||||
_assert_(it.second.m_integer_values.size() < 4);
|
||||
ASSERT(it.second.m_integer_values.size() < 4);
|
||||
std::copy_n(it.second.m_integer_values.begin(), it.second.m_integer_values.size(),
|
||||
value.as_int);
|
||||
break;
|
||||
|
||||
case PostProcessingShaderConfiguration::ConfigurationOption::OptionType::OPTION_FLOAT:
|
||||
_assert_(it.second.m_float_values.size() < 4);
|
||||
ASSERT(it.second.m_float_values.size() < 4);
|
||||
std::copy_n(it.second.m_float_values.begin(), it.second.m_float_values.size(),
|
||||
value.as_float);
|
||||
break;
|
||||
|
@ -1123,7 +1123,7 @@ void Renderer::SetTexture(u32 index, const AbstractTexture* texture)
|
||||
// Texture should always be in SHADER_READ_ONLY layout prior to use.
|
||||
// This is so we don't need to transition during render passes.
|
||||
auto* tex = texture ? static_cast<const VKTexture*>(texture)->GetRawTexIdentifier() : nullptr;
|
||||
_dbg_assert_(VIDEO, !tex || tex->GetLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
||||
DEBUG_ASSERT(VIDEO, !tex || tex->GetLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
||||
StateTracker::GetInstance()->SetTexture(index, tex ? tex->GetView() : VK_NULL_HANDLE);
|
||||
}
|
||||
|
||||
|
@ -38,8 +38,8 @@ bool StagingBuffer::Map(VkDeviceSize offset, VkDeviceSize size)
|
||||
else
|
||||
m_map_size = size;
|
||||
|
||||
_assert_(!m_map_pointer);
|
||||
_assert_(m_map_offset + m_map_size <= m_size);
|
||||
ASSERT(!m_map_pointer);
|
||||
ASSERT(m_map_offset + m_map_size <= m_size);
|
||||
|
||||
void* map_pointer;
|
||||
VkResult res = vkMapMemory(g_vulkan_context->GetDevice(), m_memory, m_map_offset, m_map_size, 0,
|
||||
@ -56,7 +56,7 @@ bool StagingBuffer::Map(VkDeviceSize offset, VkDeviceSize size)
|
||||
|
||||
void StagingBuffer::Unmap()
|
||||
{
|
||||
_assert_(m_map_pointer);
|
||||
ASSERT(m_map_pointer);
|
||||
|
||||
vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory);
|
||||
m_map_pointer = nullptr;
|
||||
@ -66,7 +66,7 @@ void StagingBuffer::Unmap()
|
||||
|
||||
void StagingBuffer::FlushCPUCache(VkDeviceSize offset, VkDeviceSize size)
|
||||
{
|
||||
_assert_(offset >= m_map_offset);
|
||||
ASSERT(offset >= m_map_offset);
|
||||
if (m_coherent)
|
||||
return;
|
||||
|
||||
@ -83,7 +83,7 @@ void StagingBuffer::InvalidateGPUCache(VkCommandBuffer command_buffer,
|
||||
if (m_coherent)
|
||||
return;
|
||||
|
||||
_assert_((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
|
||||
ASSERT((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
|
||||
Util::BufferMemoryBarrier(command_buffer, m_buffer, VK_ACCESS_HOST_WRITE_BIT, dest_access_flags,
|
||||
offset, size, VK_PIPELINE_STAGE_HOST_BIT, dest_pipeline_stage);
|
||||
}
|
||||
@ -96,7 +96,7 @@ void StagingBuffer::PrepareForGPUWrite(VkCommandBuffer command_buffer,
|
||||
if (m_coherent)
|
||||
return;
|
||||
|
||||
_assert_((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
|
||||
ASSERT((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
|
||||
Util::BufferMemoryBarrier(command_buffer, m_buffer, 0, dst_access_flags, offset, size,
|
||||
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, dst_pipeline_stage);
|
||||
}
|
||||
@ -108,14 +108,14 @@ void StagingBuffer::FlushGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBi
|
||||
if (m_coherent)
|
||||
return;
|
||||
|
||||
_assert_((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
|
||||
ASSERT((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
|
||||
Util::BufferMemoryBarrier(command_buffer, m_buffer, src_access_flags, VK_ACCESS_HOST_READ_BIT,
|
||||
offset, size, src_pipeline_stage, VK_PIPELINE_STAGE_HOST_BIT);
|
||||
}
|
||||
|
||||
void StagingBuffer::InvalidateCPUCache(VkDeviceSize offset, VkDeviceSize size)
|
||||
{
|
||||
_assert_(offset >= m_map_offset);
|
||||
ASSERT(offset >= m_map_offset);
|
||||
if (m_coherent)
|
||||
return;
|
||||
|
||||
@ -126,8 +126,8 @@ void StagingBuffer::InvalidateCPUCache(VkDeviceSize offset, VkDeviceSize size)
|
||||
|
||||
void StagingBuffer::Read(VkDeviceSize offset, void* data, size_t size, bool invalidate_caches)
|
||||
{
|
||||
_assert_((offset + size) <= m_size);
|
||||
_assert_(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset)));
|
||||
ASSERT((offset + size) <= m_size);
|
||||
ASSERT(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset)));
|
||||
if (invalidate_caches)
|
||||
InvalidateCPUCache(offset, size);
|
||||
|
||||
@ -137,8 +137,8 @@ void StagingBuffer::Read(VkDeviceSize offset, void* data, size_t size, bool inva
|
||||
void StagingBuffer::Write(VkDeviceSize offset, const void* data, size_t size,
|
||||
bool invalidate_caches)
|
||||
{
|
||||
_assert_((offset + size) <= m_size);
|
||||
_assert_(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset)));
|
||||
ASSERT((offset + size) <= m_size);
|
||||
ASSERT(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset)));
|
||||
|
||||
memcpy(m_map_pointer + (offset - m_map_offset), data, size);
|
||||
if (invalidate_caches)
|
||||
|
@ -37,7 +37,7 @@ StateTracker* StateTracker::GetInstance()
|
||||
|
||||
bool StateTracker::CreateInstance()
|
||||
{
|
||||
_assert_(!s_state_tracker);
|
||||
ASSERT(!s_state_tracker);
|
||||
s_state_tracker = std::make_unique<StateTracker>();
|
||||
if (!s_state_tracker->Initialize())
|
||||
{
|
||||
@ -116,7 +116,7 @@ void StateTracker::SetIndexBuffer(VkBuffer buffer, VkDeviceSize offset, VkIndexT
|
||||
void StateTracker::SetRenderPass(VkRenderPass load_render_pass, VkRenderPass clear_render_pass)
|
||||
{
|
||||
// Should not be changed within a render pass.
|
||||
_assert_(!InRenderPass());
|
||||
ASSERT(!InRenderPass());
|
||||
m_load_render_pass = load_render_pass;
|
||||
m_clear_render_pass = clear_render_pass;
|
||||
}
|
||||
@ -124,7 +124,7 @@ void StateTracker::SetRenderPass(VkRenderPass load_render_pass, VkRenderPass cle
|
||||
void StateTracker::SetFramebuffer(VkFramebuffer framebuffer, const VkRect2D& render_area)
|
||||
{
|
||||
// Should not be changed within a render pass.
|
||||
_assert_(!InRenderPass());
|
||||
ASSERT(!InRenderPass());
|
||||
m_framebuffer = framebuffer;
|
||||
m_framebuffer_size = render_area;
|
||||
}
|
||||
@ -395,7 +395,7 @@ void StateTracker::EndRenderPass()
|
||||
void StateTracker::BeginClearRenderPass(const VkRect2D& area, const VkClearValue* clear_values,
|
||||
u32 num_clear_values)
|
||||
{
|
||||
_assert_(!InRenderPass());
|
||||
ASSERT(!InRenderPass());
|
||||
|
||||
m_current_render_pass = m_clear_render_pass;
|
||||
m_framebuffer_render_area = area;
|
||||
|
@ -209,8 +209,8 @@ bool StreamBuffer::ReserveMemory(size_t num_bytes, size_t alignment, bool allow_
|
||||
// Can we find a fence to wait on that will give us enough memory?
|
||||
if (allow_reuse && WaitForClearSpace(required_bytes))
|
||||
{
|
||||
_assert_(m_current_offset == m_current_gpu_position ||
|
||||
(m_current_offset + required_bytes) < m_current_gpu_position);
|
||||
ASSERT(m_current_offset == m_current_gpu_position ||
|
||||
(m_current_offset + required_bytes) < m_current_gpu_position);
|
||||
m_current_offset = Util::AlignBufferOffset(m_current_offset, alignment);
|
||||
m_last_allocation_size = num_bytes;
|
||||
return true;
|
||||
@ -232,8 +232,8 @@ bool StreamBuffer::ReserveMemory(size_t num_bytes, size_t alignment, bool allow_
|
||||
|
||||
void StreamBuffer::CommitMemory(size_t final_num_bytes)
|
||||
{
|
||||
_assert_((m_current_offset + final_num_bytes) <= m_current_size);
|
||||
_assert_(final_num_bytes <= m_last_allocation_size);
|
||||
ASSERT((m_current_offset + final_num_bytes) <= m_current_size);
|
||||
ASSERT(final_num_bytes <= m_last_allocation_size);
|
||||
|
||||
// For non-coherent mappings, flush the memory range
|
||||
if (!m_coherent_mapping)
|
||||
|
@ -155,7 +155,7 @@ bool SwapChain::SelectSurfaceFormat()
|
||||
std::vector<VkSurfaceFormatKHR> surface_formats(format_count);
|
||||
res = vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(), m_surface,
|
||||
&format_count, surface_formats.data());
|
||||
_assert_(res == VK_SUCCESS);
|
||||
ASSERT(res == VK_SUCCESS);
|
||||
|
||||
// If there is a single undefined surface format, the device doesn't care, so we'll just use RGBA
|
||||
if (surface_formats[0].format == VK_FORMAT_UNDEFINED)
|
||||
@ -189,7 +189,7 @@ bool SwapChain::SelectPresentMode()
|
||||
std::vector<VkPresentModeKHR> present_modes(mode_count);
|
||||
res = vkGetPhysicalDeviceSurfacePresentModesKHR(g_vulkan_context->GetPhysicalDevice(), m_surface,
|
||||
&mode_count, present_modes.data());
|
||||
_assert_(res == VK_SUCCESS);
|
||||
ASSERT(res == VK_SUCCESS);
|
||||
|
||||
// Checks if a particular mode is supported, if it is, returns that mode.
|
||||
auto CheckForMode = [&present_modes](VkPresentModeKHR check_mode) {
|
||||
@ -341,7 +341,7 @@ bool SwapChain::CreateSwapChain()
|
||||
|
||||
bool SwapChain::SetupSwapChainImages()
|
||||
{
|
||||
_assert_(m_swap_chain_images.empty());
|
||||
ASSERT(m_swap_chain_images.empty());
|
||||
|
||||
uint32_t image_count;
|
||||
VkResult res =
|
||||
@ -355,7 +355,7 @@ bool SwapChain::SetupSwapChainImages()
|
||||
std::vector<VkImage> images(image_count);
|
||||
res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count,
|
||||
images.data());
|
||||
_assert_(res == VK_SUCCESS);
|
||||
ASSERT(res == VK_SUCCESS);
|
||||
|
||||
m_swap_chain_images.reserve(image_count);
|
||||
for (uint32_t i = 0; i < image_count; i++)
|
||||
|
@ -302,7 +302,7 @@ void Texture2D::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout
|
||||
|
||||
void Texture2D::TransitionToLayout(VkCommandBuffer command_buffer, ComputeImageLayout new_layout)
|
||||
{
|
||||
_assert_(new_layout != ComputeImageLayout::Undefined);
|
||||
ASSERT(new_layout != ComputeImageLayout::Undefined);
|
||||
if (m_compute_layout == new_layout)
|
||||
return;
|
||||
|
||||
|
@ -222,7 +222,7 @@ void TextureCache::CopyEFBToCacheEntry(TCacheEntry* entry, bool is_depth_copy,
|
||||
framebuffer_mgr->FlushEFBPokes();
|
||||
|
||||
// Has to be flagged as a render target.
|
||||
_assert_(texture->GetFramebuffer() != VK_NULL_HANDLE);
|
||||
ASSERT(texture->GetFramebuffer() != VK_NULL_HANDLE);
|
||||
|
||||
// Can't be done in a render pass, since we're doing our own render pass!
|
||||
VkCommandBuffer command_buffer = g_command_buffer_mgr->GetCurrentCommandBuffer();
|
||||
|
@ -158,8 +158,8 @@ void TextureConverter::ConvertTexture(TextureCacheBase::TCacheEntry* dst_entry,
|
||||
VKTexture* source_texture = static_cast<VKTexture*>(src_entry->texture.get());
|
||||
VKTexture* destination_texture = static_cast<VKTexture*>(dst_entry->texture.get());
|
||||
|
||||
_assert_(static_cast<size_t>(palette_format) < NUM_PALETTE_CONVERSION_SHADERS);
|
||||
_assert_(destination_texture->GetConfig().rendertarget);
|
||||
ASSERT(static_cast<size_t>(palette_format) < NUM_PALETTE_CONVERSION_SHADERS);
|
||||
ASSERT(destination_texture->GetConfig().rendertarget);
|
||||
|
||||
// We want to align to 2 bytes (R16) or the device's texel buffer alignment, whichever is greater.
|
||||
size_t palette_size = src_entry->format == TextureFormat::I4 ? 32 : 512;
|
||||
|
@ -397,7 +397,7 @@ void UtilityShaderDraw::CommitPSUniforms(size_t size)
|
||||
|
||||
void UtilityShaderDraw::SetPushConstants(const void* data, size_t data_size)
|
||||
{
|
||||
_assert_(static_cast<u32>(data_size) < PUSH_CONSTANT_BUFFER_SIZE);
|
||||
ASSERT(static_cast<u32>(data_size) < PUSH_CONSTANT_BUFFER_SIZE);
|
||||
|
||||
vkCmdPushConstants(m_command_buffer, m_pipeline_info.pipeline_layout,
|
||||
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0,
|
||||
@ -414,8 +414,8 @@ void UtilityShaderDraw::SetPSSampler(size_t index, VkImageView view, VkSampler s
|
||||
void UtilityShaderDraw::SetPSTexelBuffer(VkBufferView view)
|
||||
{
|
||||
// Should only be used with the texture conversion pipeline layout.
|
||||
_assert_(m_pipeline_info.pipeline_layout ==
|
||||
g_object_cache->GetPipelineLayout(PIPELINE_LAYOUT_TEXTURE_CONVERSION));
|
||||
ASSERT(m_pipeline_info.pipeline_layout ==
|
||||
g_object_cache->GetPipelineLayout(PIPELINE_LAYOUT_TEXTURE_CONVERSION));
|
||||
|
||||
m_ps_texel_buffer = view;
|
||||
}
|
||||
@ -765,7 +765,7 @@ void ComputeShaderDispatcher::CommitUniformBuffer(size_t size)
|
||||
|
||||
void ComputeShaderDispatcher::SetPushConstants(const void* data, size_t data_size)
|
||||
{
|
||||
_assert_(static_cast<u32>(data_size) < PUSH_CONSTANT_BUFFER_SIZE);
|
||||
ASSERT(static_cast<u32>(data_size) < PUSH_CONSTANT_BUFFER_SIZE);
|
||||
|
||||
vkCmdPushConstants(m_command_buffer, m_pipeline_info.pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
0, static_cast<u32>(data_size), data);
|
||||
|
@ -27,7 +27,7 @@ VKPipeline::~VKPipeline()
|
||||
|
||||
std::unique_ptr<VKPipeline> VKPipeline::Create(const AbstractPipelineConfig& config)
|
||||
{
|
||||
_dbg_assert_(VIDEO, config.vertex_shader && config.pixel_shader);
|
||||
DEBUG_ASSERT(VIDEO, config.vertex_shader && config.pixel_shader);
|
||||
|
||||
// Get render pass for config.
|
||||
VkRenderPass render_pass = g_object_cache->GetRenderPass(
|
||||
|
@ -34,7 +34,7 @@ VKShader::~VKShader()
|
||||
|
||||
bool VKShader::HasBinary() const
|
||||
{
|
||||
_assert_(!m_spv.empty());
|
||||
ASSERT(!m_spv.empty());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -131,13 +131,13 @@ void VKTexture::CopyRectangleFromTexture(const AbstractTexture* src,
|
||||
{
|
||||
Texture2D* src_texture = static_cast<const VKTexture*>(src)->GetRawTexIdentifier();
|
||||
|
||||
_assert_msg_(VIDEO, static_cast<u32>(src_rect.GetWidth()) <= src_texture->GetWidth() &&
|
||||
static_cast<u32>(src_rect.GetHeight()) <= src_texture->GetHeight(),
|
||||
"Source rect is too large for CopyRectangleFromTexture");
|
||||
ASSERT_MSG(VIDEO, static_cast<u32>(src_rect.GetWidth()) <= src_texture->GetWidth() &&
|
||||
static_cast<u32>(src_rect.GetHeight()) <= src_texture->GetHeight(),
|
||||
"Source rect is too large for CopyRectangleFromTexture");
|
||||
|
||||
_assert_msg_(VIDEO, static_cast<u32>(dst_rect.GetWidth()) <= m_config.width &&
|
||||
static_cast<u32>(dst_rect.GetHeight()) <= m_config.height,
|
||||
"Dest rect is too large for CopyRectangleFromTexture");
|
||||
ASSERT_MSG(VIDEO, static_cast<u32>(dst_rect.GetWidth()) <= m_config.width &&
|
||||
static_cast<u32>(dst_rect.GetHeight()) <= m_config.height,
|
||||
"Dest rect is too large for CopyRectangleFromTexture");
|
||||
|
||||
VkImageCopy image_copy = {
|
||||
{VK_IMAGE_ASPECT_COLOR_BIT, src_level, src_layer, src_texture->GetLayers()},
|
||||
@ -176,8 +176,8 @@ void VKTexture::ScaleRectangleFromTexture(const AbstractTexture* source,
|
||||
StateTracker::GetInstance()->SetPendingRebind();
|
||||
|
||||
// Can't render to a non-rendertarget (no framebuffer).
|
||||
_assert_msg_(VIDEO, m_config.rendertarget,
|
||||
"Destination texture for partial copy is not a rendertarget");
|
||||
ASSERT_MSG(VIDEO, m_config.rendertarget,
|
||||
"Destination texture for partial copy is not a rendertarget");
|
||||
|
||||
// Render pass expects dst_texture to be in COLOR_ATTACHMENT_OPTIMAL state.
|
||||
// src_texture should already be in SHADER_READ_ONLY state, but transition in case (XFB).
|
||||
@ -216,10 +216,10 @@ void VKTexture::ResolveFromTexture(const AbstractTexture* src, const MathUtil::R
|
||||
u32 layer, u32 level)
|
||||
{
|
||||
const VKTexture* srcentry = static_cast<const VKTexture*>(src);
|
||||
_dbg_assert_(VIDEO, m_config.samples == 1 && m_config.width == srcentry->m_config.width &&
|
||||
DEBUG_ASSERT(VIDEO, m_config.samples == 1 && m_config.width == srcentry->m_config.width &&
|
||||
m_config.height == srcentry->m_config.height &&
|
||||
srcentry->m_config.samples > 1);
|
||||
_dbg_assert_(VIDEO,
|
||||
DEBUG_ASSERT(VIDEO,
|
||||
rect.left + rect.GetWidth() <= static_cast<int>(srcentry->m_config.width) &&
|
||||
rect.top + rect.GetHeight() <= static_cast<int>(srcentry->m_config.height));
|
||||
|
||||
@ -407,13 +407,13 @@ void VKStagingTexture::CopyFromTexture(const AbstractTexture* src,
|
||||
const MathUtil::Rectangle<int>& src_rect, u32 src_layer,
|
||||
u32 src_level, const MathUtil::Rectangle<int>& dst_rect)
|
||||
{
|
||||
_assert_(m_type == StagingTextureType::Readback);
|
||||
_assert_(src_rect.GetWidth() == dst_rect.GetWidth() &&
|
||||
src_rect.GetHeight() == dst_rect.GetHeight());
|
||||
_assert_(src_rect.left >= 0 && static_cast<u32>(src_rect.right) <= src->GetConfig().width &&
|
||||
src_rect.top >= 0 && static_cast<u32>(src_rect.bottom) <= src->GetConfig().height);
|
||||
_assert_(dst_rect.left >= 0 && static_cast<u32>(dst_rect.right) <= m_config.width &&
|
||||
dst_rect.top >= 0 && static_cast<u32>(dst_rect.bottom) <= m_config.height);
|
||||
ASSERT(m_type == StagingTextureType::Readback);
|
||||
ASSERT(src_rect.GetWidth() == dst_rect.GetWidth() &&
|
||||
src_rect.GetHeight() == dst_rect.GetHeight());
|
||||
ASSERT(src_rect.left >= 0 && static_cast<u32>(src_rect.right) <= src->GetConfig().width &&
|
||||
src_rect.top >= 0 && static_cast<u32>(src_rect.bottom) <= src->GetConfig().height);
|
||||
ASSERT(dst_rect.left >= 0 && static_cast<u32>(dst_rect.right) <= m_config.width &&
|
||||
dst_rect.top >= 0 && static_cast<u32>(dst_rect.bottom) <= m_config.height);
|
||||
|
||||
Texture2D* src_tex = static_cast<const VKTexture*>(src)->GetRawTexIdentifier();
|
||||
CopyFromTexture(src_tex, src_rect, src_layer, src_level, dst_rect);
|
||||
@ -458,7 +458,7 @@ void VKStagingTexture::CopyFromTexture(Texture2D* src, const MathUtil::Rectangle
|
||||
m_needs_flush = true;
|
||||
g_command_buffer_mgr->AddFencePointCallback(this,
|
||||
[this](VkCommandBuffer buf, VkFence fence) {
|
||||
_assert_(m_needs_flush);
|
||||
ASSERT(m_needs_flush);
|
||||
m_flush_fence = fence;
|
||||
},
|
||||
[this](VkFence fence) {
|
||||
@ -473,13 +473,13 @@ void VKStagingTexture::CopyToTexture(const MathUtil::Rectangle<int>& src_rect, A
|
||||
const MathUtil::Rectangle<int>& dst_rect, u32 dst_layer,
|
||||
u32 dst_level)
|
||||
{
|
||||
_assert_(m_type == StagingTextureType::Upload);
|
||||
_assert_(src_rect.GetWidth() == dst_rect.GetWidth() &&
|
||||
src_rect.GetHeight() == dst_rect.GetHeight());
|
||||
_assert_(src_rect.left >= 0 && static_cast<u32>(src_rect.right) <= m_config.width &&
|
||||
src_rect.top >= 0 && static_cast<u32>(src_rect.bottom) <= m_config.height);
|
||||
_assert_(dst_rect.left >= 0 && static_cast<u32>(dst_rect.right) <= dst->GetConfig().width &&
|
||||
dst_rect.top >= 0 && static_cast<u32>(dst_rect.bottom) <= dst->GetConfig().height);
|
||||
ASSERT(m_type == StagingTextureType::Upload);
|
||||
ASSERT(src_rect.GetWidth() == dst_rect.GetWidth() &&
|
||||
src_rect.GetHeight() == dst_rect.GetHeight());
|
||||
ASSERT(src_rect.left >= 0 && static_cast<u32>(src_rect.right) <= m_config.width &&
|
||||
src_rect.top >= 0 && static_cast<u32>(src_rect.bottom) <= m_config.height);
|
||||
ASSERT(dst_rect.left >= 0 && static_cast<u32>(dst_rect.right) <= dst->GetConfig().width &&
|
||||
dst_rect.top >= 0 && static_cast<u32>(dst_rect.bottom) <= dst->GetConfig().height);
|
||||
|
||||
if (m_needs_flush)
|
||||
{
|
||||
@ -518,7 +518,7 @@ void VKStagingTexture::CopyToTexture(const MathUtil::Rectangle<int>& src_rect, A
|
||||
m_needs_flush = true;
|
||||
g_command_buffer_mgr->AddFencePointCallback(this,
|
||||
[this](VkCommandBuffer buf, VkFence fence) {
|
||||
_assert_(m_needs_flush);
|
||||
ASSERT(m_needs_flush);
|
||||
m_flush_fence = fence;
|
||||
},
|
||||
[this](VkFence fence) {
|
||||
|
@ -42,7 +42,7 @@ static VkFormat VarToVkFormat(VarType t, uint32_t components, bool integer)
|
||||
VK_FORMAT_R32G32B32A32_SFLOAT} // VAR_FLOAT
|
||||
};
|
||||
|
||||
_assert_(components > 0 && components <= 4);
|
||||
ASSERT(components > 0 && components <= 4);
|
||||
return integer ? integer_type_lookup[t][components - 1] : float_type_lookup[t][components - 1];
|
||||
}
|
||||
|
||||
@ -120,7 +120,7 @@ void VertexFormat::SetupInputState()
|
||||
void VertexFormat::AddAttribute(uint32_t location, uint32_t binding, VkFormat format,
|
||||
uint32_t offset)
|
||||
{
|
||||
_assert_(m_num_attributes < MAX_VERTEX_ATTRIBUTES);
|
||||
ASSERT(m_num_attributes < MAX_VERTEX_ATTRIBUTES);
|
||||
|
||||
m_attribute_descriptions[m_num_attributes].location = location;
|
||||
m_attribute_descriptions[m_num_attributes].binding = binding;
|
||||
|
@ -59,7 +59,7 @@ bool VulkanContext::CheckValidationLayerAvailablility()
|
||||
|
||||
std::vector<VkExtensionProperties> extension_list(extension_count);
|
||||
res = vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, extension_list.data());
|
||||
_assert_(res == VK_SUCCESS);
|
||||
ASSERT(res == VK_SUCCESS);
|
||||
|
||||
u32 layer_count = 0;
|
||||
res = vkEnumerateInstanceLayerProperties(&layer_count, nullptr);
|
||||
@ -71,7 +71,7 @@ bool VulkanContext::CheckValidationLayerAvailablility()
|
||||
|
||||
std::vector<VkLayerProperties> layer_list(layer_count);
|
||||
res = vkEnumerateInstanceLayerProperties(&layer_count, layer_list.data());
|
||||
_assert_(res == VK_SUCCESS);
|
||||
ASSERT(res == VK_SUCCESS);
|
||||
|
||||
// Check for both VK_EXT_debug_report and VK_LAYER_LUNARG_standard_validation
|
||||
return (std::find_if(extension_list.begin(), extension_list.end(),
|
||||
@ -148,7 +148,7 @@ bool VulkanContext::SelectInstanceExtensions(ExtensionList* extension_list, bool
|
||||
std::vector<VkExtensionProperties> available_extension_list(extension_count);
|
||||
res = vkEnumerateInstanceExtensionProperties(nullptr, &extension_count,
|
||||
available_extension_list.data());
|
||||
_assert_(res == VK_SUCCESS);
|
||||
ASSERT(res == VK_SUCCESS);
|
||||
|
||||
for (const auto& extension_properties : available_extension_list)
|
||||
INFO_LOG(VIDEO, "Available extension: %s", extension_properties.extensionName);
|
||||
@ -391,7 +391,7 @@ bool VulkanContext::SelectDeviceExtensions(ExtensionList* extension_list, bool e
|
||||
std::vector<VkExtensionProperties> available_extension_list(extension_count);
|
||||
res = vkEnumerateDeviceExtensionProperties(m_physical_device, nullptr, &extension_count,
|
||||
available_extension_list.data());
|
||||
_assert_(res == VK_SUCCESS);
|
||||
ASSERT(res == VK_SUCCESS);
|
||||
|
||||
for (const auto& extension_properties : available_extension_list)
|
||||
INFO_LOG(VIDEO, "Available extension: %s", extension_properties.extensionName);
|
||||
|
Reference in New Issue
Block a user