From 119747240b8d1bc481980b44ac7a0a2e8e0f5d79 Mon Sep 17 00:00:00 2001 From: nyorain Date: Mon, 17 Jun 2024 00:35:15 +0200 Subject: [PATCH] Attempt to fix CI for tracy --- src/tracy/TracyClient.cpp | 10 ++++++++++ src/tracy/client/TracySysTrace.cpp | 4 +++- src/tracy/client/tracy_rpmalloc.cpp | 14 +++++++------- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/src/tracy/TracyClient.cpp b/src/tracy/TracyClient.cpp index 26387b76..37301bb0 100644 --- a/src/tracy/TracyClient.cpp +++ b/src/tracy/TracyClient.cpp @@ -19,6 +19,12 @@ # pragma warning(push, 0) #endif +#ifdef __GNUC__ + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wunused-parameter" + #pragma GCC diagnostic ignored "-Wmissing-field-initializers" +#endif // __GNUC__ + #include "common/tracy_lz4.cpp" #include "client/TracyProfiler.cpp" #include "client/TracyCallstack.cpp" @@ -55,4 +61,8 @@ # pragma warning(pop) #endif +#ifdef __GNUC__ + #pragma GCC diagnostic pop +#endif // __GNUC__ + #endif diff --git a/src/tracy/client/TracySysTrace.cpp b/src/tracy/client/TracySysTrace.cpp index af0641fe..f6584e85 100644 --- a/src/tracy/client/TracySysTrace.cpp +++ b/src/tracy/client/TracySysTrace.cpp @@ -1143,7 +1143,9 @@ void SysTraceWorker( void* ptr ) SetThreadName( "Tracy Sampling" ); InitRpmalloc(); sched_param sp = { 99 }; - if( pthread_setschedparam( pthread_self(), SCHED_FIFO, &sp ) != 0 ) TracyDebug( "Failed to increase SysTraceWorker thread priority!\n" ); + if( pthread_setschedparam( pthread_self(), SCHED_FIFO, &sp ) != 0 ) { + TracyDebug( "Failed to increase SysTraceWorker thread priority!\n" ); + } auto ctxBufferIdx = s_ctxBufferIdx; auto ringArray = s_ring; auto numBuffers = s_numBuffers; diff --git a/src/tracy/client/tracy_rpmalloc.cpp b/src/tracy/client/tracy_rpmalloc.cpp index 711505d2..360e97eb 100644 --- a/src/tracy/client/tracy_rpmalloc.cpp +++ b/src/tracy/client/tracy_rpmalloc.cpp @@ -699,7 +699,7 @@ static pthread_key_t _memory_thread_heap; # define _Thread_local __thread # endif # endif -static _Thread_local heap_t* _memory_thread_heap TLS_MODEL; +static thread_local heap_t* _memory_thread_heap TLS_MODEL; #endif static inline heap_t* @@ -1291,7 +1291,7 @@ _rpmalloc_span_initialize_new(heap_t* heap, heap_size_class_t* heap_size_class, //Setup free list. Only initialize one system page worth of free blocks in list void* block; - span->free_list_limit = free_list_partial_init(&heap_size_class->free_list, &block, + span->free_list_limit = free_list_partial_init(&heap_size_class->free_list, &block, span, pointer_offset(span, SPAN_HEADER_SIZE), size_class->block_count, size_class->block_size); //Link span as partial if there remains blocks to be initialized as free list, or full if fully initialized if (span->free_list_limit < span->block_count) { @@ -1395,7 +1395,7 @@ _rpmalloc_global_cache_finalize(global_cache_t* cache) { static void _rpmalloc_global_cache_insert_spans(span_t** span, size_t span_count, size_t count) { - const size_t cache_limit = (span_count == 1) ? + const size_t cache_limit = (span_count == 1) ? GLOBAL_CACHE_MULTIPLIER * MAX_THREAD_SPAN_CACHE : GLOBAL_CACHE_MULTIPLIER * (MAX_THREAD_SPAN_LARGE_CACHE - (span_count >> 1)); @@ -1420,7 +1420,7 @@ _rpmalloc_global_cache_insert_spans(span_t** span, size_t span_count, size_t cou // Enable unlimited cache if huge pages, or we will leak since it is unlikely that an entire huge page // will be unmapped, and we're unable to partially decommit a huge page while ((_memory_page_size > _memory_span_size) && (insert_count < count)) { -#endif +#endif span_t* current_span = span[insert_count++]; current_span->next = cache->overflow; cache->overflow = current_span; @@ -2093,7 +2093,7 @@ _rpmalloc_allocate_from_heap_fallback(heap_t* heap, heap_size_class_t* heap_size heap_size_class->free_list = span->free_list; span->free_list = 0; } else { - //If the span did not fully initialize free list, link up another page worth of blocks + //If the span did not fully initialize free list, link up another page worth of blocks void* block_start = pointer_offset(span, SPAN_HEADER_SIZE + ((size_t)span->free_list_limit * span->block_size)); span->free_list_limit += free_list_partial_init(&heap_size_class->free_list, &block, (void*)((uintptr_t)block_start & ~(_memory_page_size - 1)), block_start, @@ -2923,7 +2923,7 @@ rpmalloc_finalize(void) { _memory_global_reserve_count = 0; _memory_global_reserve = 0; } - atomic_store32_release(&_memory_global_lock, 0); + atomic_store32_release(&_memory_global_lock, 0); //Free all thread caches and fully free spans for (size_t list_idx = 0; list_idx < HEAP_ARRAY_SIZE; ++list_idx) { @@ -3423,7 +3423,7 @@ rpmalloc_heap_aligned_realloc(rpmalloc_heap_t* heap, void* ptr, size_t alignment return 0; } #endif - return _rpmalloc_aligned_reallocate(heap, ptr, alignment, size, 0, flags); + return _rpmalloc_aligned_reallocate(heap, ptr, alignment, size, 0, flags); } extern inline void