Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix duplicated names if both D2D12 and Vulkan are enabled #258

Merged
merged 1 commit into from
Feb 6, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions lib/Remotery.c
Original file line number Diff line number Diff line change
Expand Up @@ -8786,7 +8786,7 @@ static rmtError CreateQueryFence(D3D12BindImpl* bind, ID3D12Device* d3d_device)
return RMT_ERROR_NONE;
}

static rmtError CopyTimestamps(D3D12BindImpl* bind, rmtU32 ring_pos_a, rmtU32 ring_pos_b, double gpu_ticks_to_us, rmtS64 gpu_to_cpu_timestamp_us)
static rmtError CopyD3D12Timestamps(D3D12BindImpl* bind, rmtU32 ring_pos_a, rmtU32 ring_pos_b, double gpu_ticks_to_us, rmtS64 gpu_to_cpu_timestamp_us)
{
rmtU32 query_index;
D3D12_RANGE map;
Expand Down Expand Up @@ -8872,12 +8872,12 @@ static rmtError D3D12MarkFrame(D3D12BindImpl* bind)
// Will have to split the copies into two passes if they cross the ring buffer wrap around
if (ring_pos_b < ring_pos_a)
{
rmtTry(CopyTimestamps(bind, ring_pos_a, bind->maxNbQueries, gpu_ticks_to_us, gpu_to_cpu_timestamp_us));
rmtTry(CopyTimestamps(bind, 0, ring_pos_b, gpu_ticks_to_us, gpu_to_cpu_timestamp_us));
rmtTry(CopyD3D12Timestamps(bind, ring_pos_a, bind->maxNbQueries, gpu_ticks_to_us, gpu_to_cpu_timestamp_us));
rmtTry(CopyD3D12Timestamps(bind, 0, ring_pos_b, gpu_ticks_to_us, gpu_to_cpu_timestamp_us));
}
else
{
rmtTry(CopyTimestamps(bind, ring_pos_a, ring_pos_b, gpu_ticks_to_us, gpu_to_cpu_timestamp_us));
rmtTry(CopyD3D12Timestamps(bind, ring_pos_a, ring_pos_b, gpu_ticks_to_us, gpu_to_cpu_timestamp_us));
}

// Release the ring buffer entries just processed
Expand Down Expand Up @@ -9037,7 +9037,7 @@ static rmtError AllocateD3D12SampleTree(SampleTree** d3d_tree)
return RMT_ERROR_NONE;
}

static rmtError AllocQueryPair(D3D12BindImpl* d3d_bind, rmtAtomicU32* out_allocation_index)
static rmtError AllocD3D12QueryPair(D3D12BindImpl* d3d_bind, rmtAtomicU32* out_allocation_index)
{
// Check for overflow against a tail which is only ever written by one thread
rmtU32 read = LoadAcquire(&d3d_bind->ringBufferRead);
Expand Down Expand Up @@ -9091,7 +9091,7 @@ RMT_API void _rmt_BeginD3D12Sample(rmtD3D12Bind* bind, void* command_list, rmtPS
d3d_sample->commandList = d3d_command_list;
d3d_sample->base.usGpuIssueOnCpu = usTimer_Get(&g_Remotery->timer);

error = AllocQueryPair(d3d_bind, &d3d_sample->queryIndex);
error = AllocD3D12QueryPair(d3d_bind, &d3d_sample->queryIndex);
if (error == RMT_ERROR_NONE)
{
rmtU32 physical_query_index = d3d_sample->queryIndex & (d3d_bind->maxNbQueries - 1);
Expand Down Expand Up @@ -10160,7 +10160,7 @@ static rmtError CreateQuerySemaphore(VulkanBindImpl* bind, VkDevice vulkan_devic
return RMT_ERROR_NONE;
}

static rmtError CopyTimestamps(VulkanBindImpl* bind, VkDevice vulkan_device, rmtU32 ring_pos_a, rmtU32 ring_pos_b, double gpu_ticks_to_us, rmtS64 gpu_to_cpu_timestamp_us)
static rmtError CopyVulkanTimestamps(VulkanBindImpl* bind, VkDevice vulkan_device, rmtU32 ring_pos_a, rmtU32 ring_pos_b, double gpu_ticks_to_us, rmtS64 gpu_to_cpu_timestamp_us)
{
rmtU32 query_index;
VulkanSample** cpu_sample_buffer = bind->sampleRingBuffer;
Expand Down Expand Up @@ -10349,12 +10349,12 @@ static rmtError VulkanMarkFrame(VulkanBindImpl* bind, rmtBool recurse)
// Will have to split the copies into two passes if they cross the ring buffer wrap around
if (ring_pos_b < ring_pos_a)
{
rmtTry(CopyTimestamps(bind, vulkan_device, ring_pos_a, bind->maxNbQueries, gpu_ticks_to_us, gpu_to_cpu_timestamp_us));
rmtTry(CopyTimestamps(bind, vulkan_device, 0, ring_pos_b, gpu_ticks_to_us, gpu_to_cpu_timestamp_us));
rmtTry(CopyVulkanTimestamps(bind, vulkan_device, ring_pos_a, bind->maxNbQueries, gpu_ticks_to_us, gpu_to_cpu_timestamp_us));
rmtTry(CopyVulkanTimestamps(bind, vulkan_device, 0, ring_pos_b, gpu_ticks_to_us, gpu_to_cpu_timestamp_us));
}
else
{
rmtTry(CopyTimestamps(bind, vulkan_device, ring_pos_a, ring_pos_b, gpu_ticks_to_us, gpu_to_cpu_timestamp_us));
rmtTry(CopyVulkanTimestamps(bind, vulkan_device, ring_pos_a, ring_pos_b, gpu_ticks_to_us, gpu_to_cpu_timestamp_us));
}

// Release the ring buffer entries just processed
Expand Down Expand Up @@ -10561,7 +10561,7 @@ static rmtError AllocateVulkanSampleTree(SampleTree** vulkan_tree)
return RMT_ERROR_NONE;
}

static rmtError AllocQueryPair(VulkanBindImpl* vulkan_bind, rmtU32* out_allocation_index)
static rmtError AllocVulkanQueryPair(VulkanBindImpl* vulkan_bind, rmtU32* out_allocation_index)
{
// Check for overflow against a tail which is only ever written by one thread
rmtU64 read = LoadAcquire64(&vulkan_bind->ringBufferRead);
Expand Down Expand Up @@ -10616,7 +10616,7 @@ RMT_API void _rmt_BeginVulkanSample(rmtVulkanBind* bind, void* command_buffer, r
vulkan_sample->commandBuffer = vulkan_command_buffer;
vulkan_sample->base.usGpuIssueOnCpu = usTimer_Get(&g_Remotery->timer);

error = AllocQueryPair(vulkan_bind, &vulkan_sample->queryIndex);
error = AllocVulkanQueryPair(vulkan_bind, &vulkan_sample->queryIndex);
if (error == RMT_ERROR_NONE)
{
rmtU32 physical_query_index = vulkan_sample->queryIndex & (vulkan_bind->maxNbQueries - 1);
Expand Down
Loading