@@ -226,7 +226,7 @@ void CommandBufferManager::WaitForFence(VkFence fence)
if (m_frame_resources[command_buffer_index].fence == fence)
break;
}
_assert_(command_buffer_index < m_frame_resources.size());
ASSERT(command_buffer_index < m_frame_resources.size());

// Has this command buffer already been waited for?
if (!m_frame_resources[command_buffer_index].needs_fence_wait)
@@ -342,7 +342,7 @@ void CommandBufferManager::SubmitCommandBuffer(size_t index, VkSemaphore wait_se
if (present_swap_chain != VK_NULL_HANDLE)
{
// Should have a signal semaphore.
_assert_(signal_semaphore != VK_NULL_HANDLE);
ASSERT(signal_semaphore != VK_NULL_HANDLE);
VkPresentInfoKHR present_info = {VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
nullptr,
1,
@@ -489,14 +489,14 @@ void CommandBufferManager::AddFencePointCallback(
const CommandBufferExecutedCallback& executed_callback)
{
// Shouldn't be adding twice.
_assert_(m_fence_point_callbacks.find(key) == m_fence_point_callbacks.end());
ASSERT(m_fence_point_callbacks.find(key) == m_fence_point_callbacks.end());
m_fence_point_callbacks.emplace(key, std::make_pair(queued_callback, executed_callback));
}

void CommandBufferManager::RemoveFencePointCallback(const void* key)
{
auto iter = m_fence_point_callbacks.find(key);
_assert_(iter != m_fence_point_callbacks.end());
ASSERT(iter != m_fence_point_callbacks.end());
m_fence_point_callbacks.erase(iter);
}

@@ -393,9 +393,9 @@ Texture2D* FramebufferManager::ResolveEFBColorTexture(const VkRect2D& region)

// It's not valid to resolve out-of-bounds coordinates.
// Ensuring the region is within the image is the caller's responsibility.
_assert_(region.offset.x >= 0 && region.offset.y >= 0 &&
(static_cast<u32>(region.offset.x) + region.extent.width) <= GetEFBWidth() &&
(static_cast<u32>(region.offset.y) + region.extent.height) <= GetEFBHeight());
ASSERT(region.offset.x >= 0 && region.offset.y >= 0 &&
(static_cast<u32>(region.offset.x) + region.extent.width) <= GetEFBWidth() &&
(static_cast<u32>(region.offset.y) + region.extent.height) <= GetEFBHeight());

// Resolving is considered to be a transfer operation.
m_efb_color_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
@@ -76,7 +76,7 @@ void PerfQuery::EnableQuery(PerfQueryGroup type)
{
u32 index = (m_query_read_pos + m_query_count) % PERF_QUERY_BUFFER_SIZE;
ActiveQuery& entry = m_query_buffer[index];
_assert_(!entry.active && !entry.available);
ASSERT(!entry.active && !entry.available);
entry.active = true;
m_query_count++;

@@ -245,12 +245,12 @@ void PerfQuery::OnCommandBufferQueued(VkCommandBuffer command_buffer, VkFence fe
if (entry.available)
{
// These should be grouped together, and at the start.
_assert_(copy_count == 0);
ASSERT(copy_count == 0);
continue;
}

// If this wrapped around, we need to flush the entries before the end of the buffer.
_assert_(entry.active);
ASSERT(entry.active);
if (index < copy_start_index)
{
QueueCopyQueryResults(command_buffer, fence, copy_start_index, copy_count);
@@ -311,7 +311,7 @@ void PerfQuery::ProcessResults(u32 start_index, u32 query_count)
query_count * sizeof(PerfQueryDataType));

// Should be at maximum query_count queries pending.
_assert_(query_count <= m_query_count);
ASSERT(query_count <= m_query_count);
DEBUG_LOG(VIDEO, "process queries %u-%u", start_index, start_index + query_count - 1);

// Remove pending queries.
@@ -321,7 +321,7 @@ void PerfQuery::ProcessResults(u32 start_index, u32 query_count)
ActiveQuery& entry = m_query_buffer[index];

// Should have a fence associated with it (waiting for a result).
_assert_(entry.pending_fence != VK_NULL_HANDLE);
ASSERT(entry.pending_fence != VK_NULL_HANDLE);
entry.pending_fence = VK_NULL_HANDLE;
entry.available = false;
entry.active = false;
@@ -117,13 +117,13 @@ void VulkanPostProcessing::FillUniformBuffer(u8* buf, const TargetRectangle& src
break;

case PostProcessingShaderConfiguration::ConfigurationOption::OptionType::OPTION_INTEGER:
_assert_(it.second.m_integer_values.size() < 4);
ASSERT(it.second.m_integer_values.size() < 4);
std::copy_n(it.second.m_integer_values.begin(), it.second.m_integer_values.size(),
value.as_int);
break;

case PostProcessingShaderConfiguration::ConfigurationOption::OptionType::OPTION_FLOAT:
_assert_(it.second.m_float_values.size() < 4);
ASSERT(it.second.m_float_values.size() < 4);
std::copy_n(it.second.m_float_values.begin(), it.second.m_float_values.size(),
value.as_float);
break;
@@ -1123,7 +1123,7 @@ void Renderer::SetTexture(u32 index, const AbstractTexture* texture)
// Texture should always be in SHADER_READ_ONLY layout prior to use.
// This is so we don't need to transition during render passes.
auto* tex = texture ? static_cast<const VKTexture*>(texture)->GetRawTexIdentifier() : nullptr;
_dbg_assert_(VIDEO, !tex || tex->GetLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
DEBUG_ASSERT(VIDEO, !tex || tex->GetLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
StateTracker::GetInstance()->SetTexture(index, tex ? tex->GetView() : VK_NULL_HANDLE);
}

@@ -38,8 +38,8 @@ bool StagingBuffer::Map(VkDeviceSize offset, VkDeviceSize size)
else
m_map_size = size;

_assert_(!m_map_pointer);
_assert_(m_map_offset + m_map_size <= m_size);
ASSERT(!m_map_pointer);
ASSERT(m_map_offset + m_map_size <= m_size);

void* map_pointer;
VkResult res = vkMapMemory(g_vulkan_context->GetDevice(), m_memory, m_map_offset, m_map_size, 0,
@@ -56,7 +56,7 @@ bool StagingBuffer::Map(VkDeviceSize offset, VkDeviceSize size)

void StagingBuffer::Unmap()
{
_assert_(m_map_pointer);
ASSERT(m_map_pointer);

vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory);
m_map_pointer = nullptr;
@@ -66,7 +66,7 @@ void StagingBuffer::Unmap()

void StagingBuffer::FlushCPUCache(VkDeviceSize offset, VkDeviceSize size)
{
_assert_(offset >= m_map_offset);
ASSERT(offset >= m_map_offset);
if (m_coherent)
return;

@@ -83,7 +83,7 @@ void StagingBuffer::InvalidateGPUCache(VkCommandBuffer command_buffer,
if (m_coherent)
return;

_assert_((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
ASSERT((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
Util::BufferMemoryBarrier(command_buffer, m_buffer, VK_ACCESS_HOST_WRITE_BIT, dest_access_flags,
offset, size, VK_PIPELINE_STAGE_HOST_BIT, dest_pipeline_stage);
}
@@ -96,7 +96,7 @@ void StagingBuffer::PrepareForGPUWrite(VkCommandBuffer command_buffer,
if (m_coherent)
return;

_assert_((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
ASSERT((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
Util::BufferMemoryBarrier(command_buffer, m_buffer, 0, dst_access_flags, offset, size,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, dst_pipeline_stage);
}
@@ -108,14 +108,14 @@ void StagingBuffer::FlushGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBi
if (m_coherent)
return;

_assert_((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
ASSERT((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
Util::BufferMemoryBarrier(command_buffer, m_buffer, src_access_flags, VK_ACCESS_HOST_READ_BIT,
offset, size, src_pipeline_stage, VK_PIPELINE_STAGE_HOST_BIT);
}

void StagingBuffer::InvalidateCPUCache(VkDeviceSize offset, VkDeviceSize size)
{
_assert_(offset >= m_map_offset);
ASSERT(offset >= m_map_offset);
if (m_coherent)
return;

@@ -126,8 +126,8 @@ void StagingBuffer::InvalidateCPUCache(VkDeviceSize offset, VkDeviceSize size)

void StagingBuffer::Read(VkDeviceSize offset, void* data, size_t size, bool invalidate_caches)
{
_assert_((offset + size) <= m_size);
_assert_(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset)));
ASSERT((offset + size) <= m_size);
ASSERT(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset)));
if (invalidate_caches)
InvalidateCPUCache(offset, size);

@@ -137,8 +137,8 @@ void StagingBuffer::Read(VkDeviceSize offset, void* data, size_t size, bool inva
void StagingBuffer::Write(VkDeviceSize offset, const void* data, size_t size,
bool invalidate_caches)
{
_assert_((offset + size) <= m_size);
_assert_(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset)));
ASSERT((offset + size) <= m_size);
ASSERT(offset >= m_map_offset && size <= (m_map_size + (offset - m_map_offset)));

memcpy(m_map_pointer + (offset - m_map_offset), data, size);
if (invalidate_caches)
@@ -37,7 +37,7 @@ StateTracker* StateTracker::GetInstance()

bool StateTracker::CreateInstance()
{
_assert_(!s_state_tracker);
ASSERT(!s_state_tracker);
s_state_tracker = std::make_unique<StateTracker>();
if (!s_state_tracker->Initialize())
{
@@ -116,15 +116,15 @@ void StateTracker::SetIndexBuffer(VkBuffer buffer, VkDeviceSize offset, VkIndexT
void StateTracker::SetRenderPass(VkRenderPass load_render_pass, VkRenderPass clear_render_pass)
{
// Should not be changed within a render pass.
_assert_(!InRenderPass());
ASSERT(!InRenderPass());
m_load_render_pass = load_render_pass;
m_clear_render_pass = clear_render_pass;
}

void StateTracker::SetFramebuffer(VkFramebuffer framebuffer, const VkRect2D& render_area)
{
// Should not be changed within a render pass.
_assert_(!InRenderPass());
ASSERT(!InRenderPass());
m_framebuffer = framebuffer;
m_framebuffer_size = render_area;
}
@@ -395,7 +395,7 @@ void StateTracker::EndRenderPass()
void StateTracker::BeginClearRenderPass(const VkRect2D& area, const VkClearValue* clear_values,
u32 num_clear_values)
{
_assert_(!InRenderPass());
ASSERT(!InRenderPass());

m_current_render_pass = m_clear_render_pass;
m_framebuffer_render_area = area;
@@ -209,8 +209,8 @@ bool StreamBuffer::ReserveMemory(size_t num_bytes, size_t alignment, bool allow_
// Can we find a fence to wait on that will give us enough memory?
if (allow_reuse && WaitForClearSpace(required_bytes))
{
_assert_(m_current_offset == m_current_gpu_position ||
(m_current_offset + required_bytes) < m_current_gpu_position);
ASSERT(m_current_offset == m_current_gpu_position ||
(m_current_offset + required_bytes) < m_current_gpu_position);
m_current_offset = Util::AlignBufferOffset(m_current_offset, alignment);
m_last_allocation_size = num_bytes;
return true;
@@ -232,8 +232,8 @@ bool StreamBuffer::ReserveMemory(size_t num_bytes, size_t alignment, bool allow_

void StreamBuffer::CommitMemory(size_t final_num_bytes)
{
_assert_((m_current_offset + final_num_bytes) <= m_current_size);
_assert_(final_num_bytes <= m_last_allocation_size);
ASSERT((m_current_offset + final_num_bytes) <= m_current_size);
ASSERT(final_num_bytes <= m_last_allocation_size);

// For non-coherent mappings, flush the memory range
if (!m_coherent_mapping)
@@ -155,7 +155,7 @@ bool SwapChain::SelectSurfaceFormat()
std::vector<VkSurfaceFormatKHR> surface_formats(format_count);
res = vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(), m_surface,
&format_count, surface_formats.data());
_assert_(res == VK_SUCCESS);
ASSERT(res == VK_SUCCESS);

// If there is a single undefined surface format, the device doesn't care, so we'll just use RGBA
if (surface_formats[0].format == VK_FORMAT_UNDEFINED)
@@ -189,7 +189,7 @@ bool SwapChain::SelectPresentMode()
std::vector<VkPresentModeKHR> present_modes(mode_count);
res = vkGetPhysicalDeviceSurfacePresentModesKHR(g_vulkan_context->GetPhysicalDevice(), m_surface,
&mode_count, present_modes.data());
_assert_(res == VK_SUCCESS);
ASSERT(res == VK_SUCCESS);

// Checks if a particular mode is supported, if it is, returns that mode.
auto CheckForMode = [&present_modes](VkPresentModeKHR check_mode) {
@@ -341,7 +341,7 @@ bool SwapChain::CreateSwapChain()

bool SwapChain::SetupSwapChainImages()
{
_assert_(m_swap_chain_images.empty());
ASSERT(m_swap_chain_images.empty());

uint32_t image_count;
VkResult res =
@@ -355,7 +355,7 @@ bool SwapChain::SetupSwapChainImages()
std::vector<VkImage> images(image_count);
res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count,
images.data());
_assert_(res == VK_SUCCESS);
ASSERT(res == VK_SUCCESS);

m_swap_chain_images.reserve(image_count);
for (uint32_t i = 0; i < image_count; i++)
@@ -302,7 +302,7 @@ void Texture2D::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout

void Texture2D::TransitionToLayout(VkCommandBuffer command_buffer, ComputeImageLayout new_layout)
{
_assert_(new_layout != ComputeImageLayout::Undefined);
ASSERT(new_layout != ComputeImageLayout::Undefined);
if (m_compute_layout == new_layout)
return;

@@ -222,7 +222,7 @@ void TextureCache::CopyEFBToCacheEntry(TCacheEntry* entry, bool is_depth_copy,
framebuffer_mgr->FlushEFBPokes();

// Has to be flagged as a render target.
_assert_(texture->GetFramebuffer() != VK_NULL_HANDLE);
ASSERT(texture->GetFramebuffer() != VK_NULL_HANDLE);

// Can't be done in a render pass, since we're doing our own render pass!
VkCommandBuffer command_buffer = g_command_buffer_mgr->GetCurrentCommandBuffer();
@@ -158,8 +158,8 @@ void TextureConverter::ConvertTexture(TextureCacheBase::TCacheEntry* dst_entry,
VKTexture* source_texture = static_cast<VKTexture*>(src_entry->texture.get());
VKTexture* destination_texture = static_cast<VKTexture*>(dst_entry->texture.get());

_assert_(static_cast<size_t>(palette_format) < NUM_PALETTE_CONVERSION_SHADERS);
_assert_(destination_texture->GetConfig().rendertarget);
ASSERT(static_cast<size_t>(palette_format) < NUM_PALETTE_CONVERSION_SHADERS);
ASSERT(destination_texture->GetConfig().rendertarget);

// We want to align to 2 bytes (R16) or the device's texel buffer alignment, whichever is greater.
size_t palette_size = src_entry->format == TextureFormat::I4 ? 32 : 512;
@@ -397,7 +397,7 @@ void UtilityShaderDraw::CommitPSUniforms(size_t size)

void UtilityShaderDraw::SetPushConstants(const void* data, size_t data_size)
{
_assert_(static_cast<u32>(data_size) < PUSH_CONSTANT_BUFFER_SIZE);
ASSERT(static_cast<u32>(data_size) < PUSH_CONSTANT_BUFFER_SIZE);

vkCmdPushConstants(m_command_buffer, m_pipeline_info.pipeline_layout,
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0,
@@ -414,8 +414,8 @@ void UtilityShaderDraw::SetPSSampler(size_t index, VkImageView view, VkSampler s
void UtilityShaderDraw::SetPSTexelBuffer(VkBufferView view)
{
// Should only be used with the texture conversion pipeline layout.
_assert_(m_pipeline_info.pipeline_layout ==
g_object_cache->GetPipelineLayout(PIPELINE_LAYOUT_TEXTURE_CONVERSION));
ASSERT(m_pipeline_info.pipeline_layout ==
g_object_cache->GetPipelineLayout(PIPELINE_LAYOUT_TEXTURE_CONVERSION));

m_ps_texel_buffer = view;
}
@@ -765,7 +765,7 @@ void ComputeShaderDispatcher::CommitUniformBuffer(size_t size)

void ComputeShaderDispatcher::SetPushConstants(const void* data, size_t data_size)
{
_assert_(static_cast<u32>(data_size) < PUSH_CONSTANT_BUFFER_SIZE);
ASSERT(static_cast<u32>(data_size) < PUSH_CONSTANT_BUFFER_SIZE);

vkCmdPushConstants(m_command_buffer, m_pipeline_info.pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT,
0, static_cast<u32>(data_size), data);
@@ -27,7 +27,7 @@ VKPipeline::~VKPipeline()

std::unique_ptr<VKPipeline> VKPipeline::Create(const AbstractPipelineConfig& config)
{
_dbg_assert_(VIDEO, config.vertex_shader && config.pixel_shader);
DEBUG_ASSERT(VIDEO, config.vertex_shader && config.pixel_shader);

// Get render pass for config.
VkRenderPass render_pass = g_object_cache->GetRenderPass(
@@ -34,7 +34,7 @@ VKShader::~VKShader()

bool VKShader::HasBinary() const
{
_assert_(!m_spv.empty());
ASSERT(!m_spv.empty());
return true;
}

@@ -131,13 +131,13 @@ void VKTexture::CopyRectangleFromTexture(const AbstractTexture* src,
{
Texture2D* src_texture = static_cast<const VKTexture*>(src)->GetRawTexIdentifier();

_assert_msg_(VIDEO, static_cast<u32>(src_rect.GetWidth()) <= src_texture->GetWidth() &&
static_cast<u32>(src_rect.GetHeight()) <= src_texture->GetHeight(),
"Source rect is too large for CopyRectangleFromTexture");
ASSERT_MSG(VIDEO, static_cast<u32>(src_rect.GetWidth()) <= src_texture->GetWidth() &&
static_cast<u32>(src_rect.GetHeight()) <= src_texture->GetHeight(),
"Source rect is too large for CopyRectangleFromTexture");

_assert_msg_(VIDEO, static_cast<u32>(dst_rect.GetWidth()) <= m_config.width &&
static_cast<u32>(dst_rect.GetHeight()) <= m_config.height,
"Dest rect is too large for CopyRectangleFromTexture");
ASSERT_MSG(VIDEO, static_cast<u32>(dst_rect.GetWidth()) <= m_config.width &&
static_cast<u32>(dst_rect.GetHeight()) <= m_config.height,
"Dest rect is too large for CopyRectangleFromTexture");

VkImageCopy image_copy = {
{VK_IMAGE_ASPECT_COLOR_BIT, src_level, src_layer, src_texture->GetLayers()},
@@ -176,8 +176,8 @@ void VKTexture::ScaleRectangleFromTexture(const AbstractTexture* source,
StateTracker::GetInstance()->SetPendingRebind();

// Can't render to a non-rendertarget (no framebuffer).
_assert_msg_(VIDEO, m_config.rendertarget,
"Destination texture for partial copy is not a rendertarget");
ASSERT_MSG(VIDEO, m_config.rendertarget,
"Destination texture for partial copy is not a rendertarget");

// Render pass expects dst_texture to be in COLOR_ATTACHMENT_OPTIMAL state.
// src_texture should already be in SHADER_READ_ONLY state, but transition in case (XFB).
@@ -216,10 +216,10 @@ void VKTexture::ResolveFromTexture(const AbstractTexture* src, const MathUtil::R
u32 layer, u32 level)
{
const VKTexture* srcentry = static_cast<const VKTexture*>(src);
_dbg_assert_(VIDEO, m_config.samples == 1 && m_config.width == srcentry->m_config.width &&
DEBUG_ASSERT(VIDEO, m_config.samples == 1 && m_config.width == srcentry->m_config.width &&
m_config.height == srcentry->m_config.height &&
srcentry->m_config.samples > 1);
_dbg_assert_(VIDEO,
DEBUG_ASSERT(VIDEO,
rect.left + rect.GetWidth() <= static_cast<int>(srcentry->m_config.width) &&
rect.top + rect.GetHeight() <= static_cast<int>(srcentry->m_config.height));

@@ -407,13 +407,13 @@ void VKStagingTexture::CopyFromTexture(const AbstractTexture* src,
const MathUtil::Rectangle<int>& src_rect, u32 src_layer,
u32 src_level, const MathUtil::Rectangle<int>& dst_rect)
{
_assert_(m_type == StagingTextureType::Readback);
_assert_(src_rect.GetWidth() == dst_rect.GetWidth() &&
src_rect.GetHeight() == dst_rect.GetHeight());
_assert_(src_rect.left >= 0 && static_cast<u32>(src_rect.right) <= src->GetConfig().width &&
src_rect.top >= 0 && static_cast<u32>(src_rect.bottom) <= src->GetConfig().height);
_assert_(dst_rect.left >= 0 && static_cast<u32>(dst_rect.right) <= m_config.width &&
dst_rect.top >= 0 && static_cast<u32>(dst_rect.bottom) <= m_config.height);
ASSERT(m_type == StagingTextureType::Readback);
ASSERT(src_rect.GetWidth() == dst_rect.GetWidth() &&
src_rect.GetHeight() == dst_rect.GetHeight());
ASSERT(src_rect.left >= 0 && static_cast<u32>(src_rect.right) <= src->GetConfig().width &&
src_rect.top >= 0 && static_cast<u32>(src_rect.bottom) <= src->GetConfig().height);
ASSERT(dst_rect.left >= 0 && static_cast<u32>(dst_rect.right) <= m_config.width &&
dst_rect.top >= 0 && static_cast<u32>(dst_rect.bottom) <= m_config.height);

Texture2D* src_tex = static_cast<const VKTexture*>(src)->GetRawTexIdentifier();
CopyFromTexture(src_tex, src_rect, src_layer, src_level, dst_rect);
@@ -458,7 +458,7 @@ void VKStagingTexture::CopyFromTexture(Texture2D* src, const MathUtil::Rectangle
m_needs_flush = true;
g_command_buffer_mgr->AddFencePointCallback(this,
[this](VkCommandBuffer buf, VkFence fence) {
_assert_(m_needs_flush);
ASSERT(m_needs_flush);
m_flush_fence = fence;
},
[this](VkFence fence) {
@@ -473,13 +473,13 @@ void VKStagingTexture::CopyToTexture(const MathUtil::Rectangle<int>& src_rect, A
const MathUtil::Rectangle<int>& dst_rect, u32 dst_layer,
u32 dst_level)
{
_assert_(m_type == StagingTextureType::Upload);
_assert_(src_rect.GetWidth() == dst_rect.GetWidth() &&
src_rect.GetHeight() == dst_rect.GetHeight());
_assert_(src_rect.left >= 0 && static_cast<u32>(src_rect.right) <= m_config.width &&
src_rect.top >= 0 && static_cast<u32>(src_rect.bottom) <= m_config.height);
_assert_(dst_rect.left >= 0 && static_cast<u32>(dst_rect.right) <= dst->GetConfig().width &&
dst_rect.top >= 0 && static_cast<u32>(dst_rect.bottom) <= dst->GetConfig().height);
ASSERT(m_type == StagingTextureType::Upload);
ASSERT(src_rect.GetWidth() == dst_rect.GetWidth() &&
src_rect.GetHeight() == dst_rect.GetHeight());
ASSERT(src_rect.left >= 0 && static_cast<u32>(src_rect.right) <= m_config.width &&
src_rect.top >= 0 && static_cast<u32>(src_rect.bottom) <= m_config.height);
ASSERT(dst_rect.left >= 0 && static_cast<u32>(dst_rect.right) <= dst->GetConfig().width &&
dst_rect.top >= 0 && static_cast<u32>(dst_rect.bottom) <= dst->GetConfig().height);

if (m_needs_flush)
{
@@ -518,7 +518,7 @@ void VKStagingTexture::CopyToTexture(const MathUtil::Rectangle<int>& src_rect, A
m_needs_flush = true;
g_command_buffer_mgr->AddFencePointCallback(this,
[this](VkCommandBuffer buf, VkFence fence) {
_assert_(m_needs_flush);
ASSERT(m_needs_flush);
m_flush_fence = fence;
},
[this](VkFence fence) {
@@ -42,7 +42,7 @@ static VkFormat VarToVkFormat(VarType t, uint32_t components, bool integer)
VK_FORMAT_R32G32B32A32_SFLOAT} // VAR_FLOAT
};

_assert_(components > 0 && components <= 4);
ASSERT(components > 0 && components <= 4);
return integer ? integer_type_lookup[t][components - 1] : float_type_lookup[t][components - 1];
}

@@ -120,7 +120,7 @@ void VertexFormat::SetupInputState()
void VertexFormat::AddAttribute(uint32_t location, uint32_t binding, VkFormat format,
uint32_t offset)
{
_assert_(m_num_attributes < MAX_VERTEX_ATTRIBUTES);
ASSERT(m_num_attributes < MAX_VERTEX_ATTRIBUTES);

m_attribute_descriptions[m_num_attributes].location = location;
m_attribute_descriptions[m_num_attributes].binding = binding;
@@ -59,7 +59,7 @@ bool VulkanContext::CheckValidationLayerAvailablility()

std::vector<VkExtensionProperties> extension_list(extension_count);
res = vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, extension_list.data());
_assert_(res == VK_SUCCESS);
ASSERT(res == VK_SUCCESS);

u32 layer_count = 0;
res = vkEnumerateInstanceLayerProperties(&layer_count, nullptr);
@@ -71,7 +71,7 @@ bool VulkanContext::CheckValidationLayerAvailablility()

std::vector<VkLayerProperties> layer_list(layer_count);
res = vkEnumerateInstanceLayerProperties(&layer_count, layer_list.data());
_assert_(res == VK_SUCCESS);
ASSERT(res == VK_SUCCESS);

// Check for both VK_EXT_debug_report and VK_LAYER_LUNARG_standard_validation
return (std::find_if(extension_list.begin(), extension_list.end(),
@@ -148,7 +148,7 @@ bool VulkanContext::SelectInstanceExtensions(ExtensionList* extension_list, bool
std::vector<VkExtensionProperties> available_extension_list(extension_count);
res = vkEnumerateInstanceExtensionProperties(nullptr, &extension_count,
available_extension_list.data());
_assert_(res == VK_SUCCESS);
ASSERT(res == VK_SUCCESS);

for (const auto& extension_properties : available_extension_list)
INFO_LOG(VIDEO, "Available extension: %s", extension_properties.extensionName);
@@ -391,7 +391,7 @@ bool VulkanContext::SelectDeviceExtensions(ExtensionList* extension_list, bool e
std::vector<VkExtensionProperties> available_extension_list(extension_count);
res = vkEnumerateDeviceExtensionProperties(m_physical_device, nullptr, &extension_count,
available_extension_list.data());
_assert_(res == VK_SUCCESS);
ASSERT(res == VK_SUCCESS);

for (const auto& extension_properties : available_extension_list)
INFO_LOG(VIDEO, "Available extension: %s", extension_properties.extensionName);
@@ -35,12 +35,12 @@ void AbstractStagingTexture::CopyToTexture(AbstractTexture* dst, u32 dst_layer,
void AbstractStagingTexture::ReadTexels(const MathUtil::Rectangle<int>& rect, void* out_ptr,
u32 out_stride)
{
_assert_(m_type != StagingTextureType::Upload);
ASSERT(m_type != StagingTextureType::Upload);
if (!PrepareForAccess())
return;

_assert_(rect.left >= 0 && static_cast<u32>(rect.right) <= m_config.width && rect.top >= 0 &&
static_cast<u32>(rect.bottom) <= m_config.height);
ASSERT(rect.left >= 0 && static_cast<u32>(rect.right) <= m_config.width && rect.top >= 0 &&
static_cast<u32>(rect.bottom) <= m_config.height);

// Offset pointer to point to start of region being copied out.
const char* current_ptr = m_map_pointer;
@@ -68,24 +68,24 @@ void AbstractStagingTexture::ReadTexels(const MathUtil::Rectangle<int>& rect, vo

void AbstractStagingTexture::ReadTexel(u32 x, u32 y, void* out_ptr)
{
_assert_(m_type != StagingTextureType::Upload);
ASSERT(m_type != StagingTextureType::Upload);
if (!PrepareForAccess())
return;

_assert_(x < m_config.width && y < m_config.height);
ASSERT(x < m_config.width && y < m_config.height);
const char* src_ptr = m_map_pointer + y * m_map_stride + x * m_texel_size;
std::memcpy(out_ptr, src_ptr, m_texel_size);
}

void AbstractStagingTexture::WriteTexels(const MathUtil::Rectangle<int>& rect, const void* in_ptr,
u32 in_stride)
{
_assert_(m_type != StagingTextureType::Readback);
ASSERT(m_type != StagingTextureType::Readback);
if (!PrepareForAccess())
return;

_assert_(rect.left >= 0 && static_cast<u32>(rect.right) <= m_config.width && rect.top >= 0 &&
static_cast<u32>(rect.bottom) <= m_config.height);
ASSERT(rect.left >= 0 && static_cast<u32>(rect.right) <= m_config.width && rect.top >= 0 &&
static_cast<u32>(rect.bottom) <= m_config.height);

// Offset pointer to point to start of region being copied to.
char* current_ptr = m_map_pointer;
@@ -112,11 +112,11 @@ void AbstractStagingTexture::WriteTexels(const MathUtil::Rectangle<int>& rect, c

void AbstractStagingTexture::WriteTexel(u32 x, u32 y, const void* in_ptr)
{
_assert_(m_type != StagingTextureType::Readback);
ASSERT(m_type != StagingTextureType::Readback);
if (!PrepareForAccess())
return;

_assert_(x < m_config.width && y < m_config.height);
ASSERT(x < m_config.width && y < m_config.height);
char* dest_ptr = m_map_pointer + y * m_map_stride + x * m_texel_size;
std::memcpy(dest_ptr, in_ptr, m_texel_size);
}
@@ -20,8 +20,8 @@ bool AbstractTexture::Save(const std::string& filename, unsigned int level)
// We can't dump compressed textures currently (it would mean drawing them to a RGBA8
// framebuffer, and saving that). TextureCache does not call Save for custom textures
// anyway, so this is fine for now.
_assert_(!IsCompressedFormat(m_config.format));
_assert_(level < m_config.levels);
ASSERT(!IsCompressedFormat(m_config.format));
ASSERT(level < m_config.levels);

// Determine dimensions of image we want to save.
u32 level_width = std::max(1u, m_config.width >> level);
@@ -17,7 +17,7 @@ AsyncShaderCompiler::~AsyncShaderCompiler()
{
// Pending work can be left at shutdown.
// The work item classes are expected to clean up after themselves.
_assert_(!HasWorkerThreads());
ASSERT(!HasWorkerThreads());
}

void AsyncShaderCompiler::QueueWorkItem(WorkItemPtr item)
@@ -333,16 +333,16 @@ void GatherPipeBursted()

Fifo::RunGpu();

_assert_msg_(COMMANDPROCESSOR, fifo.CPReadWriteDistance <= fifo.CPEnd - fifo.CPBase,
"FIFO is overflowed by GatherPipe !\nCPU thread is too fast!");
ASSERT_MSG(COMMANDPROCESSOR, fifo.CPReadWriteDistance <= fifo.CPEnd - fifo.CPBase,
"FIFO is overflowed by GatherPipe !\nCPU thread is too fast!");

// check if we are in sync
_assert_msg_(COMMANDPROCESSOR, fifo.CPWritePointer == ProcessorInterface::Fifo_CPUWritePointer,
"FIFOs linked but out of sync");
_assert_msg_(COMMANDPROCESSOR, fifo.CPBase == ProcessorInterface::Fifo_CPUBase,
"FIFOs linked but out of sync");
_assert_msg_(COMMANDPROCESSOR, fifo.CPEnd == ProcessorInterface::Fifo_CPUEnd,
"FIFOs linked but out of sync");
ASSERT_MSG(COMMANDPROCESSOR, fifo.CPWritePointer == ProcessorInterface::Fifo_CPUWritePointer,
"FIFOs linked but out of sync");
ASSERT_MSG(COMMANDPROCESSOR, fifo.CPBase == ProcessorInterface::Fifo_CPUBase,
"FIFOs linked but out of sync");
ASSERT_MSG(COMMANDPROCESSOR, fifo.CPEnd == ProcessorInterface::Fifo_CPUEnd,
"FIFOs linked but out of sync");
}

void UpdateInterrupts(u64 userdata)
@@ -342,10 +342,10 @@ void RunGpuLoop()
else
readPtr += 32;

_assert_msg_(COMMANDPROCESSOR, (s32)fifo.CPReadWriteDistance - 32 >= 0,
"Negative fifo.CPReadWriteDistance = %i in FIFO Loop !\nThat can produce "
"instability in the game. Please report it.",
fifo.CPReadWriteDistance - 32);
ASSERT_MSG(COMMANDPROCESSOR, (s32)fifo.CPReadWriteDistance - 32 >= 0,
"Negative fifo.CPReadWriteDistance = %i in FIFO Loop !\nThat can produce "
"instability in the game. Please report it.",
fifo.CPReadWriteDistance - 32);

u8* write_ptr = s_video_buffer_write_ptr;
s_video_buffer_read_ptr = OpcodeDecoder::Run(
@@ -67,7 +67,7 @@ static void GenerateLightShader(ShaderCode& object, const LightingUidData& uid_d
swizzle_components, LIGHT_COL_PARAMS(index, swizzle));
break;
default:
_assert_(0);
ASSERT(0);
}

object.Write("\n");
@@ -947,7 +947,7 @@ static void WriteStage(ShaderCode& out, const pixel_shader_uid_data* uid_data, i
}
else if (tevind.mid <= 7 && bHasTexCoord)
{ // s matrix
_assert_(tevind.mid >= 5);
ASSERT(tevind.mid >= 5);
int mtxidx = 2 * (tevind.mid - 5);
out.SetConstantsUsed(C_INDTEXMTX + mtxidx, C_INDTEXMTX + mtxidx);

@@ -969,7 +969,7 @@ static void WriteStage(ShaderCode& out, const pixel_shader_uid_data* uid_data, i
}
else if (tevind.mid <= 11 && bHasTexCoord)
{ // t matrix
_assert_(tevind.mid >= 9);
ASSERT(tevind.mid >= 9);
int mtxidx = 2 * (tevind.mid - 9);
out.SetConstantsUsed(C_INDTEXMTX + mtxidx, C_INDTEXMTX + mtxidx);

@@ -761,7 +761,7 @@ void Renderer::RenderFrameDump()
TextureConfig config(target_width, target_height, 1, 1, 1, AbstractTextureFormat::RGBA8, true);
m_frame_dump_render_texture.reset();
m_frame_dump_render_texture = CreateTexture(config);
_assert_(m_frame_dump_render_texture);
ASSERT(m_frame_dump_render_texture);
}

// Scaling is likely to occur here, but if possible, do a bit-for-bit copy.
@@ -1971,7 +1971,7 @@ void TextureCacheBase::TCacheEntry::SetXfbCopy(u32 stride)
is_xfb_copy = true;
memory_stride = stride;

_assert_msg_(VIDEO, memory_stride >= BytesPerRow(), "Memory stride is too small");
ASSERT_MSG(VIDEO, memory_stride >= BytesPerRow(), "Memory stride is too small");

size_in_bytes = memory_stride * NumBlocksY();
}
@@ -1982,7 +1982,7 @@ void TextureCacheBase::TCacheEntry::SetEfbCopy(u32 stride)
is_xfb_copy = false;
memory_stride = stride;

_assert_msg_(VIDEO, memory_stride >= BytesPerRow(), "Memory stride is too small");
ASSERT_MSG(VIDEO, memory_stride >= BytesPerRow(), "Memory stride is too small");

size_in_bytes = memory_stride * NumBlocksY();
}
@@ -244,7 +244,7 @@ void VertexLoader::CompileVertexTranslator()
WriteCall(Color_ReadDirect_32b_8888);
break;
default:
_assert_(0);
ASSERT(0);
break;
}
break;
@@ -271,7 +271,7 @@ void VertexLoader::CompileVertexTranslator()
WriteCall(Color_ReadIndex8_32b_8888);
break;
default:
_assert_(0);
ASSERT(0);
break;
}
break;
@@ -298,7 +298,7 @@ void VertexLoader::CompileVertexTranslator()
WriteCall(Color_ReadIndex16_32b_8888);
break;
default:
_assert_(0);
ASSERT(0);
break;
}
break;
@@ -325,12 +325,12 @@ void VertexLoader::CompileVertexTranslator()

if (tc[i] != NOT_PRESENT)
{
_assert_msg_(VIDEO, DIRECT <= tc[i] && tc[i] <= INDEX16,
"Invalid texture coordinates!\n(tc[i] = %d)", (u32)tc[i]);
_assert_msg_(VIDEO, FORMAT_UBYTE <= format && format <= FORMAT_FLOAT,
"Invalid texture coordinates format!\n(format = %d)", format);
_assert_msg_(VIDEO, 0 <= elements && elements <= 1,
"Invalid number of texture coordinates elements!\n(elements = %d)", elements);
ASSERT_MSG(VIDEO, DIRECT <= tc[i] && tc[i] <= INDEX16,
"Invalid texture coordinates!\n(tc[i] = %d)", (u32)tc[i]);
ASSERT_MSG(VIDEO, FORMAT_UBYTE <= format && format <= FORMAT_FLOAT,
"Invalid texture coordinates format!\n(format = %d)", format);
ASSERT_MSG(VIDEO, 0 <= elements && elements <= 1,
"Invalid number of texture coordinates elements!\n(elements = %d)", elements);

components |= VB_HAS_UV0 << i;
WriteCall(VertexLoader_TextCoord::GetFunction(tc[i], format, elements));
@@ -329,19 +329,19 @@ void LoadCPReg(u32 sub_cmd, u32 value, bool is_preprocess)
break;

case 0x70:
_assert_((sub_cmd & 0x0F) < 8);
ASSERT((sub_cmd & 0x0F) < 8);
state->vtx_attr[sub_cmd & 7].g0.Hex = value;
state->attr_dirty[sub_cmd & 7] = true;
break;

case 0x80:
_assert_((sub_cmd & 0x0F) < 8);
ASSERT((sub_cmd & 0x0F) < 8);
state->vtx_attr[sub_cmd & 7].g1.Hex = value;
state->attr_dirty[sub_cmd & 7] = true;
break;

case 0x90:
_assert_((sub_cmd & 0x0F) < 8);
ASSERT((sub_cmd & 0x0F) < 8);
state->vtx_attr[sub_cmd & 7].g2.Hex = value;
state->attr_dirty[sub_cmd & 7] = true;
break;
@@ -21,8 +21,8 @@ VertexShaderUid GetVertexShaderUid()
vertex_shader_uid_data* uid_data = out.GetUidData<vertex_shader_uid_data>();
memset(uid_data, 0, sizeof(*uid_data));

_assert_(bpmem.genMode.numtexgens == xfmem.numTexGen.numTexGens);
_assert_(bpmem.genMode.numcolchans == xfmem.numChan.numColorChans);
ASSERT(bpmem.genMode.numtexgens == xfmem.numTexGen.numTexGens);
ASSERT(bpmem.genMode.numcolchans == xfmem.numChan.numColorChans);

uid_data->numTexGens = xfmem.numTexGen.numTexGens;
uid_data->components = VertexLoaderManager::g_current_components;
@@ -262,8 +262,8 @@ ShaderCode GenerateVertexShaderCode(APIType api_type, const ShaderHostConfig& ho
}
break;
case XF_SRCCOLORS_INROW:
_assert_(texinfo.texgentype == XF_TEXGEN_COLOR_STRGBC0 ||
texinfo.texgentype == XF_TEXGEN_COLOR_STRGBC1);
ASSERT(texinfo.texgentype == XF_TEXGEN_COLOR_STRGBC0 ||
texinfo.texgentype == XF_TEXGEN_COLOR_STRGBC1);
break;
case XF_SRCBINORMAL_T_INROW:
if (uid_data->components & VB_HAS_NRM1)
@@ -278,7 +278,7 @@ ShaderCode GenerateVertexShaderCode(APIType api_type, const ShaderHostConfig& ho
}
break;
default:
_assert_(texinfo.sourcerow <= XF_SRCTEX7_INROW);
ASSERT(texinfo.sourcerow <= XF_SRCTEX7_INROW);
if (uid_data->components & (VB_HAS_UV0 << (texinfo.sourcerow - XF_SRCTEX0_INROW)))
out.Write("coord = float4(rawtex%d.x, rawtex%d.y, 1.0, 1.0);\n",
texinfo.sourcerow - XF_SRCTEX0_INROW, texinfo.sourcerow - XF_SRCTEX0_INROW);
@@ -307,7 +307,7 @@ ShaderCode GenerateVertexShaderCode(APIType api_type, const ShaderHostConfig& ho
{
// The following assert was triggered in House of the Dead Overkill and Star Wars Rogue
// Squadron 2
//_assert_(0); // should have normals
// ASSERT(0); // should have normals
out.Write("o.tex%d.xyz = o.tex%d.xyz;\n", i, texinfo.embosssourceshift);
}

@@ -99,11 +99,9 @@ static void XFRegWritten(int transferSize, u32 baseAddress, DataReader src)
break;

case XFMEM_SETMATRIXINDA:
//_assert_msg_(GX_XF, 0, "XF matrixindex0");
VertexShaderManager::SetTexMatrixChangedA(newValue);
break;
case XFMEM_SETMATRIXINDB:
//_assert_msg_(GX_XF, 0, "XF matrixindex1");
VertexShaderManager::SetTexMatrixChangedB(newValue);
break;