From 1e5df2958d9fedd090d6d65fe8157c4115c898d0 Mon Sep 17 00:00:00 2001 From: Meir Blachman Date: Thu, 8 Aug 2024 20:11:22 +0300 Subject: [PATCH] chore: logger enforce "CA2254: Template should be a static expression" (#564) * chore: logger enforce "CA2254: Template should be a static expression" --- .editorconfig | 4 + benchmark/Resp.benchmark/RespOnlineBench.cs | 6 +- libs/client/Utility.cs | 2 +- .../Server/Migration/MigrationDriver.cs | 6 +- .../Server/Replication/ReplicationManager.cs | 4 +- libs/cluster/Session/MigrateCommand.cs | 2 +- libs/cluster/Session/ReplicaOfCommand.cs | 2 +- libs/common/Memory/LimitedFixedBufferPool.cs | 2 +- libs/common/Networking/NetworkHandler.cs | 4 +- libs/host/Configuration/ConfigProviders.cs | 7 +- libs/host/Configuration/Options.cs | 2 +- .../Redis/RedisConfigSerializer.cs | 4 +- libs/host/MemoryLogger.cs | 2 + libs/host/ServerSettingsManager.cs | 12 +-- libs/server/Auth/GarnetAadAuthenticator.cs | 2 +- libs/server/Metrics/GarnetServerMonitor.cs | 2 +- libs/server/Resp/BasicCommands.cs | 3 +- libs/server/Resp/RespCommandsInfoProvider.cs | 4 +- libs/server/Servers/GarnetServerOptions.cs | 71 +++++++-------- libs/server/Servers/ServerOptions.cs | 20 ++--- libs/server/StoreWrapper.cs | 8 +- libs/server/TLS/ServerCertificateSelector.cs | 2 +- .../core/Allocator/BlittableScanIterator.cs | 2 +- .../core/Allocator/GenericAllocatorImpl.cs | 6 +- .../src/core/Allocator/GenericScanIterator.cs | 2 +- .../src/core/Allocator/MallocFixedPageSize.cs | 4 +- .../core/Allocator/SpanByteScanIterator.cs | 2 +- .../src/core/Index/Common/LogSizeTracker.cs | 8 +- .../cs/src/core/Index/Recovery/DeltaLog.cs | 4 +- .../core/Index/Recovery/IndexCheckpoint.cs | 2 +- .../src/core/Index/Recovery/IndexRecovery.cs | 2 +- .../cs/src/core/Index/Recovery/Recovery.cs | 4 +- .../cs/src/core/TsavoriteLog/TsavoriteLog.cs | 8 +- .../core/TsavoriteLog/TsavoriteLogIterator.cs | 2 +- .../cs/src/core/Utilities/Utility.cs | 2 +- .../ClusterStress/ShardedRespOnlineBench.cs | 8 +- .../CommandInfoUpdater/CommandInfoUpdater.cs | 30 +++---- .../EmbeddedPerformanceTest.cs | 2 +- playground/SampleModule/SampleModule.cs | 2 +- .../ClusterMigrateTests.cs | 86 +++++++++---------- test/Garnet.test.cluster/ClusterTestUtils.cs | 2 +- 41 files changed, 175 insertions(+), 174 deletions(-) diff --git a/.editorconfig b/.editorconfig index d3f98655f3..1c0c2c2106 100644 --- a/.editorconfig +++ b/.editorconfig @@ -35,6 +35,10 @@ dotnet_diagnostic.CS1591.severity = suggestion # CA5359: Do not disable certificate validation dotnet_diagnostic.CA5359.severity = suggestion +# Usage rules +# CA2254: Template should be a static expression +dotnet_diagnostic.CA2254.severity = warning + # Miscellaneous style rules dotnet_separate_import_directive_groups = false file_header_template = Copyright (c) Microsoft Corporation.\nLicensed under the MIT license. diff --git a/benchmark/Resp.benchmark/RespOnlineBench.cs b/benchmark/Resp.benchmark/RespOnlineBench.cs index 5306893e65..84a751fe40 100644 --- a/benchmark/Resp.benchmark/RespOnlineBench.cs +++ b/benchmark/Resp.benchmark/RespOnlineBench.cs @@ -323,7 +323,7 @@ public void Run() $"{"total_ops;",pad}" + $"{"iter_tops;",pad}" + $"{"tpt (Kops/sec)",pad}"; - logger?.LogInformation(msg); + logger?.LogInformation("{msg}", msg); } } @@ -379,12 +379,12 @@ public void Run() $"{summary.TotalCount,pad}" + $"{curr_iter_ops,pad}" + $"{Math.Round(BatchSize * curr_iter_ops / elapsedSecs, 2),pad}"; - logger.Log(LogLevel.Information, histogramOutput); + logger.Log(LogLevel.Information, "{msg}", histogramOutput); } else { var histogramOutput = $"{0,pad}" + $"{0,pad}" + $"{0,pad}" + $"{0,pad}" + $"{0,pad}" + $"{0,pad}" + $"{0,pad}" + $"{0,pad}" + $"{0,pad}" + $"{0,pad}"; - logger.Log(LogLevel.Information, histogramOutput); + logger.Log(LogLevel.Information, "{msg}", histogramOutput); } } last_iter_ops = summary.TotalCount; diff --git a/libs/client/Utility.cs b/libs/client/Utility.cs index 7e9073376d..a8124b0315 100644 --- a/libs/client/Utility.cs +++ b/libs/client/Utility.cs @@ -55,7 +55,7 @@ internal static int NumBitsPreviousPowerOf2(long v, ILogger logger = null) { long adjustedSize = PreviousPowerOf2(v); if (v != adjustedSize) - logger?.LogInformation($"Warning: using lower value {adjustedSize} instead of specified value {v}"); + logger?.LogInformation("Warning: using lower value {adjustedSize} instead of specified value {specifiedValue}", adjustedSize, v); return (int)Math.Log(adjustedSize, 2); } diff --git a/libs/cluster/Server/Migration/MigrationDriver.cs b/libs/cluster/Server/Migration/MigrationDriver.cs index 1fea6d2bd9..c27c5b32c0 100644 --- a/libs/cluster/Server/Migration/MigrationDriver.cs +++ b/libs/cluster/Server/Migration/MigrationDriver.cs @@ -80,7 +80,7 @@ private void BeginAsyncMigrationTask() //3. Migrate actual data if (!MigrateSlotsDriver()) { - logger?.LogError($"MigrateSlotsDriver failed"); + logger?.LogError("MigrateSlotsDriver failed"); TryRecoverFromFailure(); Status = MigrateState.FAIL; return; @@ -91,7 +91,7 @@ private void BeginAsyncMigrationTask() //5. Clear local migration set. if (!RelinquishOwnership()) { - logger?.LogError($"Failed to relinquish ownerhsip to target node"); + logger?.LogError("Failed to relinquish ownerhsip to target node"); TryRecoverFromFailure(); Status = MigrateState.FAIL; return; @@ -100,7 +100,7 @@ private void BeginAsyncMigrationTask() //6. Change ownership of slots to target node. if (!TrySetSlotRanges(GetTargetNodeId, MigrateState.NODE)) { - logger?.LogError($"Failed to assign ownerhsip to target node"); + logger?.LogError("Failed to assign ownerhsip to target node"); TryRecoverFromFailure(); Status = MigrateState.FAIL; return; diff --git a/libs/cluster/Server/Replication/ReplicationManager.cs b/libs/cluster/Server/Replication/ReplicationManager.cs index ab39e89c35..56c191b58a 100644 --- a/libs/cluster/Server/Replication/ReplicationManager.cs +++ b/libs/cluster/Server/Replication/ReplicationManager.cs @@ -202,7 +202,7 @@ public void Recover() // ReplicaRecover(); break; default: - logger?.LogError($"Not valid role for node {nodeRole}"); + logger?.LogError("Not valid role for node {nodeRole}", nodeRole); throw new Exception($"Not valid role for node {nodeRole}"); } } @@ -270,7 +270,7 @@ public void Start() if (clusterProvider.replicationManager.TryAddReplicationTask(replicaId, 0, out var aofSyncTaskInfo)) { if (!TryConnectToReplica(replicaId, 0, aofSyncTaskInfo, out var errorMessage)) - logger?.LogError($"{{errorMessage}}", Encoding.ASCII.GetString(errorMessage)); + logger?.LogError("{errorMessage}", Encoding.ASCII.GetString(errorMessage)); } } } diff --git a/libs/cluster/Session/MigrateCommand.cs b/libs/cluster/Session/MigrateCommand.cs index 9edcff0b35..dc5c53a8cd 100644 --- a/libs/cluster/Session/MigrateCommand.cs +++ b/libs/cluster/Session/MigrateCommand.cs @@ -184,7 +184,7 @@ private bool TryMIGRATE(int count, byte* ptr) // Add pointer of current parsed key if (!keys.TryAdd(new ArgSlice(keyPtr, ksize), KeyMigrationStatus.QUEUED)) { - logger?.LogWarning($"Failed to add {{key}}", Encoding.ASCII.GetString(keyPtr, ksize)); + logger?.LogWarning("Failed to add {key}", Encoding.ASCII.GetString(keyPtr, ksize)); pstate = MigrateCmdParseState.FAILEDTOADDKEY; continue; } diff --git a/libs/cluster/Session/ReplicaOfCommand.cs b/libs/cluster/Session/ReplicaOfCommand.cs index b5dc0192b2..a4654cabb9 100644 --- a/libs/cluster/Session/ReplicaOfCommand.cs +++ b/libs/cluster/Session/ReplicaOfCommand.cs @@ -47,7 +47,7 @@ private bool TryREPLICAOF(int count, byte* ptr) { if (!int.TryParse(portStr, out var port)) { - logger?.LogWarning("TryREPLICAOF failed to parse port {port}", portStr); + logger?.LogWarning($"{nameof(TryREPLICAOF)} failed to parse port {{port}}", portStr); while (!RespWriteUtils.WriteError($"ERR REPLICAOF failed to parse port '{portStr}'", ref dcurr, dend)) SendAndReset(); return true; diff --git a/libs/common/Memory/LimitedFixedBufferPool.cs b/libs/common/Memory/LimitedFixedBufferPool.cs index e5c84f1d58..bdac9cab95 100644 --- a/libs/common/Memory/LimitedFixedBufferPool.cs +++ b/libs/common/Memory/LimitedFixedBufferPool.cs @@ -76,7 +76,7 @@ public unsafe PoolEntry Get(int size) if (Interlocked.Increment(ref totalAllocations) < 0) { Interlocked.Decrement(ref totalAllocations); - logger?.LogError($"Invalid Get on disposed pool"); + logger?.LogError("Invalid Get on disposed pool"); return null; } diff --git a/libs/common/Networking/NetworkHandler.cs b/libs/common/Networking/NetworkHandler.cs index 50d67169c3..c17cb5c198 100644 --- a/libs/common/Networking/NetworkHandler.cs +++ b/libs/common/Networking/NetworkHandler.cs @@ -569,7 +569,7 @@ public override void ReturnResponseObject() { } public override unsafe bool SendResponse(int offset, int size) { #if MESSAGETRAGE - logger?.LogInformation($"Sending response of size {size} bytes"); + logger?.LogInformation("Sending response of size {size} bytes", size); logger?.LogTrace("SEND: [{send}]", System.Text.Encoding.UTF8.GetString( new Span(transportSendBuffer).Slice(offset, size)).Replace("\n", "|").Replace("\r", "")); #endif @@ -582,7 +582,7 @@ public override unsafe bool SendResponse(int offset, int size) public override void SendResponse(byte[] buffer, int offset, int count, object context) { #if MESSAGETRAGE - logger?.LogInformation($"Sending response of size {count} bytes"); + logger?.LogInformation("Sending response of size {count} bytes", count); logger?.LogTrace("SEND: [{send}]", System.Text.Encoding.UTF8.GetString( new Span(buffer).Slice(offset, count)).Replace("\n", "|").Replace("\r", "")); #endif diff --git a/libs/host/Configuration/ConfigProviders.cs b/libs/host/Configuration/ConfigProviders.cs index f3adca4793..eeadccce60 100644 --- a/libs/host/Configuration/ConfigProviders.cs +++ b/libs/host/Configuration/ConfigProviders.cs @@ -91,8 +91,7 @@ public bool TryImportOptions(string path, IStreamProvider streamProvider, Option MissingMemberHandling = MissingMemberHandling.Error, Error = delegate (object _, ErrorEventArgs args) { - logger?.LogWarning( - $"Encountered an issue when deserializing config file (Path: {path}): {args.ErrorContext.Error.Message}"); + logger?.LogWarning("Encountered an issue when deserializing config file (Path: {path}): {ErrorMessage}", path, args.ErrorContext.Error.Message); args.ErrorContext.Handled = true; } }; @@ -100,7 +99,7 @@ public bool TryImportOptions(string path, IStreamProvider streamProvider, Option } catch (Newtonsoft.Json.JsonException je) { - logger?.LogError(je, $"An error occurred while parsing config file (Path: {path})."); + logger?.LogError(je, "An error occurred while parsing config file (Path: {path}).", path); return false; } @@ -116,7 +115,7 @@ public bool TryExportOptions(string path, IStreamProvider streamProvider, Option } catch (NotSupportedException e) { - logger?.LogError(e, $"An error occurred while serializing config file (Path: {path})."); + logger?.LogError(e, "An error occurred while serializing config file (Path: {path}).", path); return false; } diff --git a/libs/host/Configuration/Options.cs b/libs/host/Configuration/Options.cs index a6ed11048c..601608d77f 100644 --- a/libs/host/Configuration/Options.cs +++ b/libs/host/Configuration/Options.cs @@ -474,7 +474,7 @@ public bool IsValid(out List invalidOptions, ILogger logger) foreach (var validationResult in validationResults) { invalidOptions.AddRange(validationResult.MemberNames); - logger?.LogError(validationResult.ErrorMessage); + logger?.LogError("{errorMessage}", validationResult.ErrorMessage); } } diff --git a/libs/host/Configuration/Redis/RedisConfigSerializer.cs b/libs/host/Configuration/Redis/RedisConfigSerializer.cs index e1392b54fe..832299bbf4 100644 --- a/libs/host/Configuration/Redis/RedisConfigSerializer.cs +++ b/libs/host/Configuration/Redis/RedisConfigSerializer.cs @@ -81,7 +81,7 @@ public static RedisOptions Deserialize(StreamReader reader, ILogger logger) var key = line.Substring(0, sepIdx); if (!KeyToProperty.Value.ContainsKey(key)) { - logger?.LogWarning($"Redis configuration option not supported: {key}."); + logger?.LogWarning("Redis configuration option not supported: {key}.", key); continue; } @@ -136,7 +136,7 @@ public static RedisOptions Deserialize(StreamReader reader, ILogger logger) var redisOptionAttr = (RedisOptionAttribute)prop.GetCustomAttributes(typeof(RedisOptionAttribute), false).First(); if (!string.IsNullOrEmpty(redisOptionAttr.UsageWarning)) { - logger?.LogWarning($"Redis configuration option usage warning ({key}): {redisOptionAttr.UsageWarning}"); + logger?.LogWarning("Redis configuration option usage warning ({key}): {UsageWarning}", key, redisOptionAttr.UsageWarning); } } diff --git a/libs/host/MemoryLogger.cs b/libs/host/MemoryLogger.cs index ce721344f3..f5e4efbf73 100644 --- a/libs/host/MemoryLogger.cs +++ b/libs/host/MemoryLogger.cs @@ -38,7 +38,9 @@ public void FlushLogger(ILogger dstLogger) { foreach (var entry in this._memoryLog) { +#pragma warning disable CA2254 // Template should be a static expression dstLogger.Log(entry.Item1, entry.Item2, entry.Item3); +#pragma warning restore CA2254 // Template should be a static expression } this._memoryLog.Clear(); } diff --git a/libs/host/ServerSettingsManager.cs b/libs/host/ServerSettingsManager.cs index d31717679d..25747c7de7 100644 --- a/libs/host/ServerSettingsManager.cs +++ b/libs/host/ServerSettingsManager.cs @@ -86,8 +86,8 @@ internal static bool TryParseCommandLineArguments(string[] args, out Options opt // If any unparsed arguments remain, display a warning to the user if (unparsedArguments.Count > 0) { - logger?.LogWarning(@$"The following command line arguments were not parsed: {string.Join(',', unparsedArguments)}. -Please check the syntax of your command. For detailed usage information run with --help."); + logger?.LogWarning(@"The following command line arguments were not parsed: {unparsedArguments}. +Please check the syntax of your command. For detailed usage information run with --help.", string.Join(',', unparsedArguments)); } } @@ -254,8 +254,8 @@ private static bool TryImportServerOptions(string path, ConfigFileType configFil _ => throw new NotImplementedException() }; - logger?.Log(importSucceeded ? LogLevel.Information : LogLevel.Error, - $"Configuration import from {fileLocation} {(importSucceeded ? "succeeded" : "failed")}. Path: {path}."); + logger?.Log(importSucceeded ? LogLevel.Information : LogLevel.Error, "Configuration import from {fileLocation} {importSucceeded}. Path: {path}.", + fileLocation, importSucceeded ? "succeeded" : "failed", path); return importSucceeded; } @@ -287,8 +287,8 @@ private static bool TryExportServerOptions(string path, ConfigFileType configFil _ => throw new NotImplementedException() }; - logger?.Log(exportSucceeded ? LogLevel.Information : LogLevel.Error, - $"Configuration export to {fileLocation} {(exportSucceeded ? "succeeded" : "failed")}. File path: {path}."); + logger?.Log(exportSucceeded ? LogLevel.Information : LogLevel.Error, "Configuration export to {fileLocation} {exportSucceeded}. File path: {path}.", + fileLocation, exportSucceeded ? "succeeded" : "failed", path); return exportSucceeded; } diff --git a/libs/server/Auth/GarnetAadAuthenticator.cs b/libs/server/Auth/GarnetAadAuthenticator.cs index 0e518bbb9f..8ee603c005 100644 --- a/libs/server/Auth/GarnetAadAuthenticator.cs +++ b/libs/server/Auth/GarnetAadAuthenticator.cs @@ -75,7 +75,7 @@ public bool Authenticate(ReadOnlySpan password, ReadOnlySpan usernam _validateTo = token.ValidTo; _authorized = IsIdentityAuthorized(identity, username); - _logger?.LogInformation($"Authentication successful. Token valid from {_validFrom} to {_validateTo}"); + _logger?.LogInformation("Authentication successful. Token valid from {validFrom} to {validateTo}", _validFrom, _validateTo); return IsAuthorized(); } diff --git a/libs/server/Metrics/GarnetServerMonitor.cs b/libs/server/Metrics/GarnetServerMonitor.cs index ed3cada17d..c12469d77d 100644 --- a/libs/server/Metrics/GarnetServerMonitor.cs +++ b/libs/server/Metrics/GarnetServerMonitor.cs @@ -213,7 +213,7 @@ private void ResetLatencyMetrics() { if (resetLatencyMetrics[eventType]) { - logger?.LogInformation($"Resetting server-side stats {eventType}"); + logger?.LogInformation("Resetting server-side stats {eventType}", eventType); var sessions = ((GarnetServerBase)server).ActiveConsumers(); foreach (var entry in sessions) diff --git a/libs/server/Resp/BasicCommands.cs b/libs/server/Resp/BasicCommands.cs index 7dda4b2a06..ceacb52210 100644 --- a/libs/server/Resp/BasicCommands.cs +++ b/libs/server/Resp/BasicCommands.cs @@ -1508,8 +1508,7 @@ void FlushDb(RespCommand cmd) else ExecuteFlushDb(unsafeTruncateLog); - logger?.LogInformation( - $"Running {nameof(cmd)} {(async ? "async" : "sync")} {(unsafeTruncateLog ? " with unsafetruncatelog." : string.Empty)}"); + logger?.LogInformation($"Running {nameof(cmd)} {{async}} {{mode}}", async ? "async" : "sync", unsafeTruncateLog ? " with unsafetruncatelog." : string.Empty); while (!RespWriteUtils.WriteDirect(CmdStrings.RESP_OK, ref dcurr, dend)) SendAndReset(); } diff --git a/libs/server/Resp/RespCommandsInfoProvider.cs b/libs/server/Resp/RespCommandsInfoProvider.cs index 8aa8849a55..8040d9ba36 100644 --- a/libs/server/Resp/RespCommandsInfoProvider.cs +++ b/libs/server/Resp/RespCommandsInfoProvider.cs @@ -107,7 +107,7 @@ public bool TryImportRespCommandsInfo(string path, IStreamProvider streamProvide } catch (JsonException je) { - logger?.LogError(je, $"An error occurred while parsing resp commands info file (Path: {path})."); + logger?.LogError(je, "An error occurred while parsing resp commands info file (Path: {path}).", path); return false; } @@ -125,7 +125,7 @@ public bool TryExportRespCommandsInfo(string path, IStreamProvider streamProvide } catch (NotSupportedException e) { - logger?.LogError(e, $"An error occurred while serializing resp commands info file (Path: {path})."); + logger?.LogError(e, "An error occurred while serializing resp commands info file (Path: {path}).", path); return false; } diff --git a/libs/server/Servers/GarnetServerOptions.cs b/libs/server/Servers/GarnetServerOptions.cs index bd937261e5..c61f472625 100644 --- a/libs/server/Servers/GarnetServerOptions.cs +++ b/libs/server/Servers/GarnetServerOptions.cs @@ -373,24 +373,25 @@ public KVSettings GetSettings(ILogger logger, out INamedDevi MutableFraction = MutablePercent / 100.0, PageSize = 1L << PageSizeBits() }; - logger?.LogInformation($"[Store] Using page size of {PrettySize(kvSettings.PageSize)}"); + logger?.LogInformation("[Store] Using page size of {PageSize}", PrettySize(kvSettings.PageSize)); kvSettings.MemorySize = 1L << MemorySizeBits(MemorySize, PageSize, out var storeEmptyPageCount); kvSettings.MinEmptyPageCount = storeEmptyPageCount; long effectiveSize = kvSettings.MemorySize - storeEmptyPageCount * kvSettings.MemorySize; if (storeEmptyPageCount == 0) - logger?.LogInformation($"[Store] Using log memory size of {PrettySize(kvSettings.MemorySize)}"); + logger?.LogInformation("[Store] Using log memory size of {MemorySize}", PrettySize(kvSettings.MemorySize)); else - logger?.LogInformation($"[Store] Using log memory size of {PrettySize(kvSettings.MemorySize)}, with {storeEmptyPageCount} empty pages, for effective size of {PrettySize(effectiveSize)}"); + logger?.LogInformation("[Store] Using log memory size of {MemorySize}, with {storeEmptyPageCount} empty pages, for effective size of {effectiveSize}", + PrettySize(kvSettings.MemorySize), storeEmptyPageCount, PrettySize(effectiveSize)); - logger?.LogInformation($"[Store] There are {PrettySize(kvSettings.MemorySize / kvSettings.PageSize)} log pages in memory"); + logger?.LogInformation("[Store] There are {LogPages} log pages in memory", PrettySize(kvSettings.MemorySize / kvSettings.PageSize)); kvSettings.SegmentSize = 1L << SegmentSizeBits(); - logger?.LogInformation($"[Store] Using disk segment size of {PrettySize(kvSettings.SegmentSize)}"); + logger?.LogInformation("[Store] Using disk segment size of {SegmentSize}", PrettySize(kvSettings.SegmentSize)); - logger?.LogInformation($"[Store] Using hash index size of {PrettySize(kvSettings.IndexSize)} ({PrettySize(indexCacheLines)} cache lines)"); - logger?.LogInformation($"[Store] Hash index size is optimized for up to ~{PrettySize(indexCacheLines * 4L)} distinct keys"); + logger?.LogInformation("[Store] Using hash index size of {IndexSize} ({indexCacheLines} cache lines)", PrettySize(kvSettings.IndexSize), PrettySize(indexCacheLines)); + logger?.LogInformation("[Store] Hash index size is optimized for up to ~{distinctKeys} distinct keys", PrettySize(indexCacheLines * 4L)); AdjustedIndexMaxCacheLines = IndexMaxSize == string.Empty ? 0 : IndexSizeCachelines("hash index max size", IndexMaxSize); if (AdjustedIndexMaxCacheLines != 0 && AdjustedIndexMaxCacheLines < indexCacheLines) @@ -398,10 +399,10 @@ public KVSettings GetSettings(ILogger logger, out INamedDevi if (AdjustedIndexMaxCacheLines > 0) { - logger?.LogInformation($"[Store] Using hash index max size of {PrettySize(AdjustedIndexMaxCacheLines * 64L)}, ({PrettySize(AdjustedIndexMaxCacheLines)} cache lines)"); - logger?.LogInformation($"[Store] Hash index max size is optimized for up to ~{PrettySize(AdjustedIndexMaxCacheLines * 4L)} distinct keys"); + logger?.LogInformation("[Store] Using hash index max size of {MaxSize}, ({CacheLines} cache lines)", PrettySize(AdjustedIndexMaxCacheLines * 64L), PrettySize(AdjustedIndexMaxCacheLines)); + logger?.LogInformation("[Store] Hash index max size is optimized for up to ~{distinctKeys} distinct keys", PrettySize(AdjustedIndexMaxCacheLines * 4L)); } - logger?.LogInformation($"[Store] Using log mutable percentage of {MutablePercent}%"); + logger?.LogInformation("[Store] Using log mutable percentage of {MutablePercent}%", MutablePercent); DeviceFactoryCreator ??= () => new LocalStorageNamedDeviceFactory(useNativeDeviceLinux: UseNativeDeviceLinux, logger: logger); @@ -428,19 +429,19 @@ public KVSettings GetSettings(ILogger logger, out INamedDevi if (RevivInChainOnly) { - logger?.LogInformation($"[Store] Using Revivification in-chain only"); + logger?.LogInformation("[Store] Using Revivification in-chain only"); kvSettings.RevivificationSettings = RevivificationSettings.InChainOnly.Clone(); } else if (UseRevivBinsPowerOf2) { - logger?.LogInformation($"[Store] Using Revivification with power-of-2 bins"); + logger?.LogInformation("[Store] Using Revivification with power-of-2 bins"); kvSettings.RevivificationSettings = RevivificationSettings.PowerOf2Bins.Clone(); kvSettings.RevivificationSettings.NumberOfBinsToSearch = RevivNumberOfBinsToSearch; kvSettings.RevivificationSettings.RevivifiableFraction = RevivifiableFraction; } else if (RevivBinRecordSizes?.Length > 0) { - logger?.LogInformation($"[Store] Using Revivification with custom bins"); + logger?.LogInformation("[Store] Using Revivification with custom bins"); // We use this in the RevivBinRecordCounts and RevivObjBinRecordCount Options help text, so assert it here because we can't use an interpolated string there. System.Diagnostics.Debug.Assert(RevivificationBin.DefaultRecordsPerBin == 256); @@ -468,7 +469,7 @@ public KVSettings GetSettings(ILogger logger, out INamedDevi } else { - logger?.LogInformation($"[Store] Not using Revivification"); + logger?.LogInformation("[Store] Not using Revivification"); } return kvSettings; @@ -511,27 +512,27 @@ public KVSettings GetObjectStoreSettings(ILogger logger, MutableFraction = ObjectStoreMutablePercent / 100.0, PageSize = 1L << ObjectStorePageSizeBits() }; - logger?.LogInformation($"[Object Store] Using page size of {PrettySize(kvSettings.PageSize)}"); - logger?.LogInformation($"[Object Store] Each page can hold ~{kvSettings.PageSize / 24} key-value pairs of objects"); + logger?.LogInformation("[Object Store] Using page size of {PageSize}", PrettySize(kvSettings.PageSize)); + logger?.LogInformation("[Object Store] Each page can hold ~{PageSize} key-value pairs of objects", kvSettings.PageSize / 24); kvSettings.MemorySize = 1L << MemorySizeBits(ObjectStoreLogMemorySize, ObjectStorePageSize, out var objectStoreEmptyPageCount); kvSettings.MinEmptyPageCount = objectStoreEmptyPageCount; long effectiveSize = kvSettings.MemorySize - objectStoreEmptyPageCount * kvSettings.PageSize; if (objectStoreEmptyPageCount == 0) - logger?.LogInformation($"[Object Store] Using log memory size of {PrettySize(kvSettings.MemorySize)}"); + logger?.LogInformation("[Object Store] Using log memory size of {MemorySize}", PrettySize(kvSettings.MemorySize)); else - logger?.LogInformation($"[Object Store] Using log memory size of {PrettySize(kvSettings.MemorySize)}, with {objectStoreEmptyPageCount} empty pages, for effective size of {PrettySize(effectiveSize)}"); + logger?.LogInformation("[Object Store] Using log memory size of {MemorySize}, with {objectStoreEmptyPageCount} empty pages, for effective size of {effectiveSize}", PrettySize(kvSettings.MemorySize), objectStoreEmptyPageCount, PrettySize(effectiveSize)); - logger?.LogInformation($"[Object Store] This can hold ~{effectiveSize / 24} key-value pairs of objects in memory total"); + logger?.LogInformation("[Object Store] This can hold ~{PageSize} key-value pairs of objects in memory total", effectiveSize / 24); - logger?.LogInformation($"[Object Store] There are {PrettySize(kvSettings.MemorySize / kvSettings.PageSize)} log pages in memory"); + logger?.LogInformation("[Object Store] There are {LogPages} log pages in memory", PrettySize(kvSettings.MemorySize / kvSettings.PageSize)); kvSettings.SegmentSize = 1L << ObjectStoreSegmentSizeBits(); - logger?.LogInformation($"[Object Store] Using disk segment size of {PrettySize(kvSettings.SegmentSize)}"); + logger?.LogInformation("[Object Store] Using disk segment size of {SegmentSize}", PrettySize(kvSettings.SegmentSize)); - logger?.LogInformation($"[Object Store] Using hash index size of {PrettySize(kvSettings.IndexSize)} ({PrettySize(indexCacheLines)} cache lines)"); - logger?.LogInformation($"[Object Store] Hash index size is optimized for up to ~{PrettySize(indexCacheLines * 4L)} distinct keys"); + logger?.LogInformation("[Object Store] Using hash index size of {IndexSize} ({indexCacheLines} cache lines)", PrettySize(kvSettings.IndexSize), PrettySize(indexCacheLines)); + logger?.LogInformation("[Object Store] Hash index size is optimized for up to ~{distinctKeys} distinct keys", PrettySize(indexCacheLines * 4L)); AdjustedObjectStoreIndexMaxCacheLines = ObjectStoreIndexMaxSize == string.Empty ? 0 : IndexSizeCachelines("hash index max size", ObjectStoreIndexMaxSize); if (AdjustedObjectStoreIndexMaxCacheLines != 0 && AdjustedObjectStoreIndexMaxCacheLines < indexCacheLines) @@ -539,13 +540,13 @@ public KVSettings GetObjectStoreSettings(ILogger logger, if (AdjustedObjectStoreIndexMaxCacheLines > 0) { - logger?.LogInformation($"[Object Store] Using hash index max size of {PrettySize(AdjustedObjectStoreIndexMaxCacheLines * 64L)}, ({PrettySize(AdjustedObjectStoreIndexMaxCacheLines)} cache lines)"); - logger?.LogInformation($"[Object Store] Hash index max size is optimized for up to ~{PrettySize(AdjustedObjectStoreIndexMaxCacheLines * 4L)} distinct keys"); + logger?.LogInformation("[Object Store] Using hash index max size of {MaxSize}, ({CacheLines} cache lines)", PrettySize(AdjustedObjectStoreIndexMaxCacheLines * 64L), PrettySize(AdjustedObjectStoreIndexMaxCacheLines)); + logger?.LogInformation("[Object Store] Hash index max size is optimized for up to ~{distinctKeys} distinct keys", PrettySize(AdjustedObjectStoreIndexMaxCacheLines * 4L)); } - logger?.LogInformation($"[Object Store] Using log mutable percentage of {ObjectStoreMutablePercent}%"); + logger?.LogInformation("[Object Store] Using log mutable percentage of {ObjectStoreMutablePercent}%", ObjectStoreMutablePercent); objTotalMemorySize = ParseSize(ObjectStoreTotalMemorySize); - logger?.LogInformation($"[Object Store] Total memory size including heap objects is {(objTotalMemorySize > 0 ? PrettySize(objTotalMemorySize) : "unlimited")}"); + logger?.LogInformation("[Object Store] Total memory size including heap objects is {totalMemorySize}", (objTotalMemorySize > 0 ? PrettySize(objTotalMemorySize) : "unlimited")); if (EnableStorageTier) { @@ -566,12 +567,12 @@ public KVSettings GetObjectStoreSettings(ILogger logger, if (RevivInChainOnly) { - logger?.LogInformation($"[Object Store] Using Revivification in-chain only"); + logger?.LogInformation("[Object Store] Using Revivification in-chain only"); kvSettings.RevivificationSettings = RevivificationSettings.InChainOnly.Clone(); } else if (UseRevivBinsPowerOf2 || RevivBinRecordSizes?.Length > 0) { - logger?.LogInformation($"[Object Store] Using Revivification with a single fixed-size bin"); + logger?.LogInformation("[Object Store] Using Revivification with a single fixed-size bin"); kvSettings.RevivificationSettings = RevivificationSettings.DefaultFixedLength.Clone(); kvSettings.RevivificationSettings.RevivifiableFraction = RevivifiableFraction; kvSettings.RevivificationSettings.FreeRecordBins[0].NumberOfRecords = RevivObjBinRecordCount; @@ -579,7 +580,7 @@ public KVSettings GetObjectStoreSettings(ILogger logger, } else { - logger?.LogInformation($"[Object Store] Not using Revivification"); + logger?.LogInformation("[Object Store] Not using Revivification"); } return kvSettings; @@ -635,7 +636,7 @@ public int AofMemorySizeBits() long size = ParseSize(AofMemorySize); long adjustedSize = PreviousPowerOf2(size); if (size != adjustedSize) - logger?.LogInformation($"Warning: using lower AOF memory size than specified (power of 2)"); + logger?.LogInformation("Warning: using lower AOF memory size than specified (power of 2)"); return (int)Math.Log(adjustedSize, 2); } @@ -648,7 +649,7 @@ public int AofPageSizeBits() long size = ParseSize(AofPageSize); long adjustedSize = PreviousPowerOf2(size); if (size != adjustedSize) - logger?.LogInformation($"Warning: using lower AOF page size than specified (power of 2)"); + logger?.LogInformation("Warning: using lower AOF page size than specified (power of 2)"); return (int)Math.Log(adjustedSize, 2); } @@ -661,7 +662,7 @@ public int AofSizeLimitSizeBits() long size = ParseSize(AofSizeLimit); long adjustedSize = PreviousPowerOf2(size); if (size != adjustedSize) - logger?.LogInformation($"Warning: using lower AOF memory size than specified (power of 2)"); + logger?.LogInformation("Warning: using lower AOF memory size than specified (power of 2)"); return (int)Math.Log(adjustedSize, 2); } @@ -674,7 +675,7 @@ public int ObjectStorePageSizeBits() long size = ParseSize(ObjectStorePageSize); long adjustedSize = PreviousPowerOf2(size); if (size != adjustedSize) - logger?.LogInformation($"Warning: using lower object store page size than specified (power of 2)"); + logger?.LogInformation("Warning: using lower object store page size than specified (power of 2)"); return (int)Math.Log(adjustedSize, 2); } @@ -687,7 +688,7 @@ public int ObjectStoreSegmentSizeBits() long size = ParseSize(ObjectStoreSegmentSize); long adjustedSize = PreviousPowerOf2(size); if (size != adjustedSize) - logger?.LogInformation($"Warning: using lower object store disk segment size than specified (power of 2)"); + logger?.LogInformation("Warning: using lower object store disk segment size than specified (power of 2)"); return (int)Math.Log(adjustedSize, 2); } diff --git a/libs/server/Servers/ServerOptions.cs b/libs/server/Servers/ServerOptions.cs index 91e0212c98..6c117175b6 100644 --- a/libs/server/Servers/ServerOptions.cs +++ b/libs/server/Servers/ServerOptions.cs @@ -115,7 +115,7 @@ public int MemorySizeBits() long size = ParseSize(MemorySize); long adjustedSize = PreviousPowerOf2(size); if (size != adjustedSize) - logger?.LogInformation($"Warning: using lower log memory size than specified (power of 2)"); + logger?.LogInformation("Warning: using lower log memory size than specified (power of 2)"); return (int)Math.Log(adjustedSize, 2); } @@ -128,7 +128,7 @@ public int PageSizeBits() long size = ParseSize(PageSize); long adjustedSize = PreviousPowerOf2(size); if (size != adjustedSize) - logger?.LogInformation($"Warning: using lower page size than specified (power of 2)"); + logger?.LogInformation("Warning: using lower page size than specified (power of 2)"); return (int)Math.Log(adjustedSize, 2); } @@ -141,7 +141,7 @@ public long PubSubPageSizeBytes() long size = ParseSize(PubSubPageSize); long adjustedSize = PreviousPowerOf2(size); if (size != adjustedSize) - logger?.LogInformation($"Warning: using lower pub/sub page size than specified (power of 2)"); + logger?.LogInformation("Warning: using lower pub/sub page size than specified (power of 2)"); return adjustedSize; } @@ -154,7 +154,7 @@ public int SegmentSizeBits() long size = ParseSize(SegmentSize); long adjustedSize = PreviousPowerOf2(size); if (size != adjustedSize) - logger?.LogInformation($"Warning: using lower disk segment size than specified (power of 2)"); + logger?.LogInformation("Warning: using lower disk segment size than specified (power of 2)"); return (int)Math.Log(adjustedSize, 2); } @@ -168,7 +168,7 @@ public int IndexSizeCachelines(string name, string indexSize) long adjustedSize = PreviousPowerOf2(size); if (adjustedSize < 64 || adjustedSize > (1L << 37)) throw new Exception($"Invalid {name}"); if (size != adjustedSize) - logger?.LogInformation($"Warning: using lower {name} than specified (power of 2)"); + logger?.LogInformation("Warning: using lower {name} than specified (power of 2)", name); return (int)(adjustedSize / 64); } @@ -184,17 +184,17 @@ public void GetSettings() PreallocateLog = false, PageSize = 1L << PageSizeBits() }; - logger?.LogInformation($"[Store] Using page size of {PrettySize(kvSettings.PageSize)}"); + logger?.LogInformation("[Store] Using page size of {PageSize}", PrettySize(kvSettings.PageSize)); kvSettings.MemorySize = 1L << MemorySizeBits(); - logger?.LogInformation($"[Store] Using log memory size of {PrettySize(kvSettings.MemorySize)}"); + logger?.LogInformation("[Store] Using log memory size of {MemorySize}", PrettySize(kvSettings.MemorySize)); - logger?.LogInformation($"[Store] There are {PrettySize(kvSettings.MemorySize / kvSettings.PageSize)} log pages in memory"); + logger?.LogInformation("[Store] There are {LogPages} log pages in memory", PrettySize(kvSettings.MemorySize / kvSettings.PageSize)); kvSettings.SegmentSize = 1L << SegmentSizeBits(); - logger?.LogInformation($"[Store] Using disk segment size of {PrettySize(kvSettings.SegmentSize)}"); + logger?.LogInformation("[Store] Using disk segment size of {SegmentSize}", PrettySize(kvSettings.SegmentSize)); - logger?.LogInformation($"[Store] Using hash index size of {PrettySize(kvSettings.IndexSize)} ({PrettySize(indexCacheLines)} cache lines)"); + logger?.LogInformation("[Store] Using hash index size of {IndexSize} ({CacheLines} cache lines)", PrettySize(kvSettings.IndexSize), PrettySize(indexCacheLines)); if (EnableStorageTier) { diff --git a/libs/server/StoreWrapper.cs b/libs/server/StoreWrapper.cs index a0e67899b6..339effe837 100644 --- a/libs/server/StoreWrapper.cs +++ b/libs/server/StoreWrapper.cs @@ -550,7 +550,7 @@ private async void IndexAutoGrowTask(CancellationToken token) } catch (Exception ex) { - logger?.LogError(ex, "IndexAutoGrowTask exception received"); + logger?.LogError(ex, $"{nameof(IndexAutoGrowTask)} exception received"); } } @@ -567,18 +567,18 @@ private async void IndexAutoGrowTask(CancellationToken token) /// True if index has reached its max size private bool GrowIndexIfNeeded(StoreType storeType, long indexMaxSize, long overflowCount, Func indexSizeRetriever, Action growAction) { - logger?.LogDebug($"IndexAutoGrowTask[{storeType}]: checking index size {indexSizeRetriever()} against max {indexMaxSize} with overflow {overflowCount}"); + logger?.LogDebug($"{nameof(IndexAutoGrowTask)}[{{storeType}}]: checking index size {{indexSizeRetriever}} against max {{indexMaxSize}} with overflow {{overflowCount}}", storeType, indexSizeRetriever(), indexMaxSize, overflowCount); if (indexSizeRetriever() < indexMaxSize && overflowCount > (indexSizeRetriever() * serverOptions.IndexResizeThreshold / 100)) { - logger?.LogInformation($"IndexAutoGrowTask[{storeType}]: overflowCount {overflowCount} ratio more than threshold {serverOptions.IndexResizeThreshold}%. Doubling index size..."); + logger?.LogInformation($"{nameof(IndexAutoGrowTask)}[{{storeType}}]: overflowCount {{overflowCount}} ratio more than threshold {{indexResizeThreshold}}%. Doubling index size...", storeType, overflowCount, serverOptions.IndexResizeThreshold); growAction(); } if (indexSizeRetriever() < indexMaxSize) return false; - logger?.LogDebug($"IndexAutoGrowTask[{storeType}]: index size {indexSizeRetriever()} reached index max size {indexMaxSize}"); + logger?.LogDebug($"{nameof(IndexAutoGrowTask)}[{{storeType}}]: index size {{indexSizeRetriever}} reached index max size {{indexMaxSize}}", storeType, indexSizeRetriever(), indexMaxSize); return true; } diff --git a/libs/server/TLS/ServerCertificateSelector.cs b/libs/server/TLS/ServerCertificateSelector.cs index 8076fe71b5..ddf67f352a 100644 --- a/libs/server/TLS/ServerCertificateSelector.cs +++ b/libs/server/TLS/ServerCertificateSelector.cs @@ -123,7 +123,7 @@ void GetServerCertificate(object _) { if (certRefreshFrequency > TimeSpan.Zero) { - this._logger?.LogError(ex, $"Unable to fetch certificate. It will be retried after {certificateRefreshRetryInterval}"); + this._logger?.LogError(ex, "Unable to fetch certificate. It will be retried after {certificateRefreshRetryInterval}", certificateRefreshRetryInterval); try { _refreshTimer?.Change(certificateRefreshRetryInterval, certRefreshFrequency); diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableScanIterator.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableScanIterator.cs index bcd4264a6d..d596189f64 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableScanIterator.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableScanIterator.cs @@ -264,7 +264,7 @@ private unsafe void AsyncReadPagesCallback(uint errorCode, uint numBytes, object if (errorCode != 0) { - logger?.LogError($"AsyncReadPagesCallback error: {errorCode}"); + logger?.LogError($"{nameof(AsyncReadPagesCallback)} error: {{errorCode}}", errorCode); result.cts?.Cancel(); } diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/GenericAllocatorImpl.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/GenericAllocatorImpl.cs index 55a4ef7a6b..2469c3a067 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/GenericAllocatorImpl.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/GenericAllocatorImpl.cs @@ -547,7 +547,7 @@ private void WriteAsync(long flushPage, ulong alignedDestinationAddres private void AsyncReadPageCallback(uint errorCode, uint numBytes, object context) { if (errorCode != 0) - logger?.LogError($"AsyncReadPageCallback error: {errorCode}"); + logger?.LogError($"{nameof(AsyncReadPageCallback)} error: {{errorCode}}", errorCode); // Set the page status to flushed var result = (PageAsyncReadResult)context; @@ -591,7 +591,7 @@ protected override void ReadAsync( private void AsyncFlushPartialObjectLogCallback(uint errorCode, uint numBytes, object context) { if (errorCode != 0) - logger?.LogError($"AsyncFlushPartialObjectLogCallback error: {errorCode}"); + logger?.LogError($"{nameof(AsyncFlushPartialObjectLogCallback)} error: {{errorCode}}", errorCode); // Set the page status to flushed var result = (PageAsyncFlushResult)context; @@ -601,7 +601,7 @@ private void AsyncFlushPartialObjectLogCallback(uint errorCode, uint n private void AsyncReadPageWithObjectsCallback(uint errorCode, uint numBytes, object context) { if (errorCode != 0) - logger?.LogError($"AsyncReadPageWithObjectsCallback error: {errorCode}"); + logger?.LogError($"{nameof(AsyncReadPageWithObjectsCallback)} error: {{errorCode}}", errorCode); var result = (PageAsyncReadResult)context; diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/GenericScanIterator.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/GenericScanIterator.cs index a825370da5..68d579fad2 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/GenericScanIterator.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/GenericScanIterator.cs @@ -274,7 +274,7 @@ private unsafe void AsyncReadPagesCallback(uint errorCode, uint numBytes, object if (errorCode != 0) { - logger?.LogError($"AsyncReadPagesCallback error: {errorCode}"); + logger?.LogError($"{nameof(AsyncReadPagesCallback)} error: {{errorCode}}", errorCode); result.cts?.Cancel(); } diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/MallocFixedPageSize.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/MallocFixedPageSize.cs index 29ffa92cba..b2d87eb6f5 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/MallocFixedPageSize.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/MallocFixedPageSize.cs @@ -383,7 +383,7 @@ private unsafe void AsyncFlushCallback(uint errorCode, uint numBytes, object con { if (errorCode != 0) { - logger?.LogError($"AsyncFlushCallback error: {errorCode}"); + logger?.LogError($"{nameof(AsyncFlushCallback)} error: {{errorCode}}", errorCode); } var mem = ((OverflowPagesFlushAsyncResult)context).mem; @@ -492,7 +492,7 @@ private unsafe void AsyncPageReadCallback(uint errorCode, uint numBytes, object { if (errorCode != 0) { - logger?.LogError($"AsyncPageReadCallback error: {errorCode}"); + logger?.LogError($"{nameof(AsyncPageReadCallback)} error: {{errorCode}}", errorCode); } recoveryCountdown.Decrement(); } diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteScanIterator.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteScanIterator.cs index 5dd6480994..3f3cb086ba 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteScanIterator.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteScanIterator.cs @@ -300,7 +300,7 @@ private unsafe void AsyncReadPagesCallback(uint errorCode, uint numBytes, object if (errorCode != 0) { - logger?.LogError($"AsyncReadPagesCallback error: {errorCode}"); + logger?.LogError($"{nameof(AsyncReadPagesCallback)} error: {{errorCode}}", errorCode); result.cts?.Cancel(); } diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Common/LogSizeTracker.cs b/libs/storage/Tsavorite/cs/src/core/Index/Common/LogSizeTracker.cs index 175d3ecea1..764b3729ba 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Common/LogSizeTracker.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Common/LogSizeTracker.cs @@ -200,7 +200,7 @@ private void ResizeIfNeeded(CancellationToken token) // Include memory size from the log (logAccessor.MemorySizeBytes) + heap size (logSize.Total) to check utilization if (logSize.Total + logAccessor.MemorySizeBytes > highTargetSize) { - logger?.LogDebug($"Heap size {logSize.Total} + log {logAccessor.MemorySizeBytes} > target {highTargetSize}. Alloc: {logAccessor.AllocatedPageCount} EPC: {logAccessor.EmptyPageCount}"); + logger?.LogDebug("Heap size {totalLogSize} + log {MemorySizeBytes} > target {highTargetSize}. Alloc: {AllocatedPageCount} EPC: {EmptyPageCount}", logSize.Total, logAccessor.MemorySizeBytes, highTargetSize, logAccessor.AllocatedPageCount, logAccessor.EmptyPageCount); while (logSize.Total + logAccessor.MemorySizeBytes > highTargetSize && logAccessor.EmptyPageCount < logAccessor.MaxEmptyPageCount) { @@ -213,12 +213,12 @@ private void ResizeIfNeeded(CancellationToken token) logAccessor.EmptyPageCount++; PostEmptyPageCountIncrease(logAccessor.EmptyPageCount); - logger?.LogDebug($"Increasing empty page count to {logAccessor.EmptyPageCount}"); + logger?.LogDebug("Increasing empty page count to {EmptyPageCount}", logAccessor.EmptyPageCount); } } else if (logSize.Total + logAccessor.MemorySizeBytes < lowTargetSize) { - logger?.LogDebug($"Heap size {logSize.Total} + log {logAccessor.MemorySizeBytes} < target {lowTargetSize}. Alloc: {logAccessor.AllocatedPageCount} EPC: {logAccessor.EmptyPageCount}"); + logger?.LogDebug("Heap size {totalLogSize} + log {MemorySizeBytes} < target {lowTargetSize}. Alloc: {AllocatedPageCount} EPC: {EmptyPageCount}", logSize.Total, logAccessor.MemorySizeBytes, lowTargetSize, logAccessor.AllocatedPageCount, logAccessor.EmptyPageCount); while (logSize.Total + logAccessor.MemorySizeBytes < lowTargetSize && logAccessor.EmptyPageCount > logAccessor.MinEmptyPageCount) { @@ -231,7 +231,7 @@ private void ResizeIfNeeded(CancellationToken token) logAccessor.EmptyPageCount--; PostEmptyPageCountDecrease(logAccessor.EmptyPageCount); - logger?.LogDebug($"Decreasing empty page count to {logAccessor.EmptyPageCount}"); + logger?.LogDebug("Decreasing empty page count to {EmptyPageCount}", logAccessor.EmptyPageCount); } } } diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Recovery/DeltaLog.cs b/libs/storage/Tsavorite/cs/src/core/Index/Recovery/DeltaLog.cs index 79cbd313d0..9890d791ad 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Recovery/DeltaLog.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Recovery/DeltaLog.cs @@ -168,7 +168,7 @@ private unsafe void AsyncReadPagesCallback(uint errorCode, uint numBytes, object if (errorCode != 0) { - logger?.LogError($"AsyncReadPagesCallback error: {errorCode}"); + logger?.LogError($"{nameof(AsyncReadPagesCallback)} error: {{errorCode}}", errorCode); result.cts?.Cancel(); } Debug.Assert(result.freeBuffer1 == null); @@ -400,7 +400,7 @@ private void AsyncFlushPageToDeviceCallback(uint errorCode, uint numBytes, objec { if (errorCode != 0) { - logger?.LogError($"AsyncFlushPageToDeviceCallback error: {errorCode}"); + logger?.LogError($"{nameof(AsyncFlushPageToDeviceCallback)} error: {{errorCode}}", errorCode); } PageAsyncFlushResult result = (PageAsyncFlushResult)context; diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Recovery/IndexCheckpoint.cs b/libs/storage/Tsavorite/cs/src/core/Index/Recovery/IndexCheckpoint.cs index c73088651c..6664b17b7d 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Recovery/IndexCheckpoint.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Recovery/IndexCheckpoint.cs @@ -161,7 +161,7 @@ private unsafe void AsyncPageFlushCallback(uint errorCode, uint numBytes, object if (errorCode != 0) { - logger?.LogError($"AsyncPageFlushCallback error: {errorCode}"); + logger?.LogError($"{nameof(AsyncPageFlushCallback)} error: {{errorCode}}", errorCode); } if (Interlocked.Decrement(ref mainIndexCheckpointCallbackCount) == 0) { diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Recovery/IndexRecovery.cs b/libs/storage/Tsavorite/cs/src/core/Index/Recovery/IndexRecovery.cs index 4a3812ad78..f2a14b6162 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Recovery/IndexRecovery.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Recovery/IndexRecovery.cs @@ -148,7 +148,7 @@ private unsafe void AsyncPageReadCallback(uint errorCode, uint numBytes, object { if (errorCode != 0) { - logger?.LogError($"AsyncPageReadCallback error: {errorCode}"); + logger?.LogError($"{nameof(AsyncPageReadCallback)} error: {{errorCode}}", errorCode); } recoveryCountdown.Decrement(); } diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Recovery/Recovery.cs b/libs/storage/Tsavorite/cs/src/core/Index/Recovery/Recovery.cs index e67fb55941..359dd6e197 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Recovery/Recovery.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Recovery/Recovery.cs @@ -1145,7 +1145,7 @@ private void AsyncFlushPageCallbackForRecovery(uint errorCode, uint numBytes, ob { if (errorCode != 0) { - logger?.LogError($"AsyncFlushPageCallbackForRecovery error: {errorCode}"); + logger?.LogError($"{nameof(AsyncFlushPageCallbackForRecovery)} error: {{errorCode}}", errorCode); } // Set the page status to flushed @@ -1277,7 +1277,7 @@ internal unsafe void AsyncReadPagesCallbackForRecovery(uint errorCode, uint numB { if (errorCode != 0) { - logger?.LogError($"AsyncReadPagesCallbackForRecovery error: {errorCode}"); + logger?.LogError($"{nameof(AsyncReadPagesCallbackForRecovery)} error: {{errorCode}}", errorCode); } // Set the page status to "read done" diff --git a/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLog.cs b/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLog.cs index e8feca24f0..85ffe0ef52 100644 --- a/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLog.cs +++ b/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLog.cs @@ -2599,7 +2599,7 @@ private unsafe void AsyncGetFromDiskCallback(uint errorCode, uint numBytes, obje if (errorCode != 0) { - logger?.LogError("AsyncGetFromDiskCallback error: {0}", errorCode); + logger?.LogError($"{nameof(AsyncGetFromDiskCallback)} error: {{errorCode}}", errorCode); ctx.record.Return(); ctx.record = null; ctx.completedRead.Release(); @@ -2611,7 +2611,7 @@ private unsafe void AsyncGetFromDiskCallback(uint errorCode, uint numBytes, obje if (length < 0 || length > allocator.PageSize) { - logger?.LogDebug("Invalid record length found: " + length); + logger?.LogDebug("Invalid record length found: {length}", length); ctx.record.Return(); ctx.record = null; ctx.completedRead.Release(); @@ -2638,7 +2638,7 @@ private void AsyncGetHeaderOnlyFromDiskCallback(uint errorCode, uint numBytes, o if (errorCode != 0) { - logger?.LogError("AsyncGetFromDiskCallback error: {0}", errorCode); + logger?.LogError($"{nameof(AsyncGetHeaderOnlyFromDiskCallback)} error: {{errorCode}}", errorCode); ctx.record.Return(); ctx.record = null; ctx.completedRead.Release(); @@ -2647,7 +2647,7 @@ private void AsyncGetHeaderOnlyFromDiskCallback(uint errorCode, uint numBytes, o { if (ctx.record.available_bytes < headerSize) { - logger?.LogDebug("No record header present at address: " + ctx.logicalAddress); + logger?.LogDebug("No record header present at address: {address}", ctx.logicalAddress); ctx.record.Return(); ctx.record = null; } diff --git a/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLogIterator.cs b/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLogIterator.cs index e4610ce365..f7f7159eb0 100644 --- a/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLogIterator.cs +++ b/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLogIterator.cs @@ -677,7 +677,7 @@ private unsafe void AsyncReadPagesCallback(uint errorCode, uint numBytes, object if (errorCode != 0) { - logger?.LogError($"AsyncReadPagesCallback error: {errorCode}"); + logger?.LogError($"{nameof(AsyncReadPagesCallback)} error: {{errorCode}}", errorCode); result.cts?.Cancel(); } diff --git a/libs/storage/Tsavorite/cs/src/core/Utilities/Utility.cs b/libs/storage/Tsavorite/cs/src/core/Utilities/Utility.cs index 9d99eefbae..6950f989f0 100644 --- a/libs/storage/Tsavorite/cs/src/core/Utilities/Utility.cs +++ b/libs/storage/Tsavorite/cs/src/core/Utilities/Utility.cs @@ -68,7 +68,7 @@ internal static int NumBitsPreviousPowerOf2(long v, ILogger logger = null) { long adjustedSize = PreviousPowerOf2(v); if (v != adjustedSize) - logger?.LogError($"Warning: using lower value {adjustedSize} instead of specified value {v}"); + logger?.LogError("Warning: using lower value {adjustedSize} instead of specified value {specifiedValue}", adjustedSize, v); return (int)Math.Log(adjustedSize, 2); } diff --git a/playground/ClusterStress/ShardedRespOnlineBench.cs b/playground/ClusterStress/ShardedRespOnlineBench.cs index 1f13ff23c9..ebcbd6b9ae 100644 --- a/playground/ClusterStress/ShardedRespOnlineBench.cs +++ b/playground/ClusterStress/ShardedRespOnlineBench.cs @@ -398,7 +398,7 @@ public static void MigrateSlots(ConnectionMultiplexer redis, IPEndPoint source, { var resp = server.Execute("migrate", args); if (!resp.Equals("OK")) - logger?.LogError(resp.ToString()); + logger?.LogError("{errorMessage}", resp.ToString()); } catch (Exception ex) { @@ -467,7 +467,7 @@ public void Run() $"{"total_ops;",pad}" + $"{"iter_tops;",pad}" + $"{"tpt (Kops/sec)",pad}"; - logger.Log(LogLevel.Information, histogramHeader); + logger.Log(LogLevel.Information, "{msg}", histogramHeader); } } @@ -512,7 +512,7 @@ public void Run() $"{summary.TotalCount,pad}" + $"{curr_iter_ops,pad}" + $"{Math.Round(BatchSize * curr_iter_ops / elapsedSecs, 2),pad}"; - logger.Log(LogLevel.Information, histogramOutput); + logger.Log(LogLevel.Information, "{msg}", histogramOutput); } last_iter_ops = summary.TotalCount; @@ -657,7 +657,7 @@ public async void OpRunnerGarnetClientSession(int thread_id) { //if(e.ToString().Contains()) if (e.ToString().Contains("CLUSTERDOWN")) - logger?.LogError(e, null); + logger?.LogError(e, "An error has occurred"); } long elapsed = Stopwatch.GetTimestamp() - startTimestamp; diff --git a/playground/CommandInfoUpdater/CommandInfoUpdater.cs b/playground/CommandInfoUpdater/CommandInfoUpdater.cs index 0d23152565..3f43720f59 100644 --- a/playground/CommandInfoUpdater/CommandInfoUpdater.cs +++ b/playground/CommandInfoUpdater/CommandInfoUpdater.cs @@ -37,7 +37,7 @@ public static bool TryUpdateCommandInfo(string outputPath, int respServerPort, I new Dictionary(); if (!force && !RespCommandsInfo.TryGetRespCommandsInfo(out existingCommandsInfo, false, logger)) { - logger.LogError($"Unable to get existing RESP commands info."); + logger.LogError("Unable to get existing RESP commands info."); return false; } @@ -46,14 +46,14 @@ public static bool TryUpdateCommandInfo(string outputPath, int respServerPort, I if (!GetUserConfirmation(commandsToAdd, commandsToRemove, logger)) { - logger.LogInformation($"User cancelled update operation."); + logger.LogInformation("User cancelled update operation."); return false; } if (!TryGetRespCommandsInfo(GarnetCommandInfoJsonPath, logger, out var garnetCommandsInfo) || garnetCommandsInfo == null) { - logger.LogError($"Unable to read Garnet RESP commands info from {GarnetCommandInfoJsonPath}."); + logger.LogError("Unable to read Garnet RESP commands info from {GarnetCommandInfoJsonPath}.", GarnetCommandInfoJsonPath); return false; } @@ -98,12 +98,11 @@ public static bool TryUpdateCommandInfo(string outputPath, int respServerPort, I if (!TryWriteRespCommandsInfo(outputPath, updatedCommandsInfo, logger)) { - logger.LogError($"Unable to write RESP commands info to path {outputPath}."); + logger.LogError("Unable to write RESP commands info to path {outputPath}.", outputPath); return false; } - logger.LogInformation( - $"RESP commands info updated successfully! Output file written to: {Path.GetFullPath(outputPath)}"); + logger.LogInformation("RESP commands info updated successfully! Output file written to: {fullOutputPath}", Path.GetFullPath(outputPath)); return true; } @@ -240,18 +239,16 @@ private static bool GetUserConfirmation(IDictionary comm var logSubCommandsToRemove = commandsToRemove.Where(c => c.Key.SubCommands != null) .SelectMany(c => c.Key.SubCommands!).ToList(); - logger.LogInformation( - $"Found {logCommandsToAdd.Count} commands to add and {logSubCommandsToAdd.Count} sub-commands to add."); + logger.LogInformation("Found {logCommandsToAddCount} commands to add and {logSubCommandsToAddCount} sub-commands to add.", logCommandsToAdd.Count, logSubCommandsToAdd.Count); if (logCommandsToAdd.Count > 0) - logger.LogInformation($"Commands to add: {string.Join(", ", logCommandsToAdd)}"); + logger.LogInformation("Commands to add: {commands}", string.Join(", ", logCommandsToAdd)); if (logSubCommandsToAdd.Count > 0) - logger.LogInformation($"Sub-Commands to add: {string.Join(", ", logSubCommandsToAdd)}"); - logger.LogInformation( - $"Found {logCommandsToRemove.Count} commands to remove and {logSubCommandsToRemove.Count} sub-commands to commandsToRemove."); + logger.LogInformation("Sub-Commands to add: {commands}", string.Join(", ", logSubCommandsToAdd)); + logger.LogInformation("Found {logCommandsToRemoveCount} commands to remove and {logSubCommandsToRemoveCount} sub-commands to commandsToRemove.", logCommandsToRemove.Count, logSubCommandsToRemove.Count); if (logCommandsToRemove.Count > 0) - logger.LogInformation($"Commands to remove: {string.Join(", ", logCommandsToRemove)}"); + logger.LogInformation("Commands to remove: {commands}", string.Join(", ", logCommandsToRemove)); if (logSubCommandsToRemove.Count > 0) - logger.LogInformation($"Sub-Commands to remove: {string.Join(", ", logSubCommandsToRemove)}"); + logger.LogInformation("Sub-Commands to remove: {commands}", string.Join(", ", logSubCommandsToRemove)); if (logCommandsToAdd.Count == 0 && logSubCommandsToAdd.Count == 0 && logCommandsToRemove.Count == 0 && logSubCommandsToRemove.Count == 0) @@ -326,7 +323,7 @@ private static unsafe bool TryGetCommandsInfo(string[] commandsToQuery, int resp // Read the array length (# of commands info returned) if (!RespReadUtils.ReadUnsignedArrayLength(out var cmdCount, ref ptr, end)) { - logger.LogError($"Unable to read RESP command info count from server"); + logger.LogError("Unable to read RESP command info count from server"); return false; } @@ -336,8 +333,7 @@ private static unsafe bool TryGetCommandsInfo(string[] commandsToQuery, int resp if (!RespCommandInfoParser.TryReadFromResp(ref ptr, end, supportedCommands, out var command) || command == null) { - logger.LogError( - $"Unable to read RESP command info from server for command {commandsToQuery[cmdIdx]}"); + logger.LogError("Unable to read RESP command info from server for command {command}", commandsToQuery[cmdIdx]); return false; } diff --git a/playground/Embedded.perftest/EmbeddedPerformanceTest.cs b/playground/Embedded.perftest/EmbeddedPerformanceTest.cs index 881dcc72a4..8cf9e277f0 100644 --- a/playground/Embedded.perftest/EmbeddedPerformanceTest.cs +++ b/playground/Embedded.perftest/EmbeddedPerformanceTest.cs @@ -127,7 +127,7 @@ public unsafe double WorkerLoop(int threadId) // Pre-generate a random operation sequence (to exclude RNG from main benchmark loop) // - logger?.LogInformation($"[{threadId:00}] Generating operation sequence with {batchSize} operations per batch"); + logger?.LogInformation("[{threadId:00}] Generating operation sequence with {batchSize} operations per batch", threadId, batchSize); Random rng = new(opts.Seed + threadId); diff --git a/playground/SampleModule/SampleModule.cs b/playground/SampleModule/SampleModule.cs index f938f9ba5a..4bf1c535f0 100644 --- a/playground/SampleModule/SampleModule.cs +++ b/playground/SampleModule/SampleModule.cs @@ -15,7 +15,7 @@ public override void OnLoad(ModuleLoadContext context, string[] args) var status = context.Initialize("SampleModule", 1); if (status != ModuleActionStatus.Success) { - context.Logger?.LogError($"Failed to initialize SampleModule. Error {status}"); + context.Logger?.LogError("Failed to initialize SampleModule. Error {status}", status); return; } diff --git a/test/Garnet.test.cluster/ClusterMigrateTests.cs b/test/Garnet.test.cluster/ClusterMigrateTests.cs index 412537d33a..227ed70733 100644 --- a/test/Garnet.test.cluster/ClusterMigrateTests.cs +++ b/test/Garnet.test.cluster/ClusterMigrateTests.cs @@ -572,7 +572,7 @@ public void ClusterRedirectMessage() [Category("CLUSTER")] public void ClusterSimpleMigrateSlots() { - context.logger.LogDebug($"0. ClusterSimpleMigrateSlotsTest started"); + context.logger.LogDebug("0. ClusterSimpleMigrateSlotsTest started"); var Port = TestUtils.Port; var Shards = defaultShards; context.CreateInstances(Shards, useTLS: UseTLS); @@ -585,7 +585,7 @@ public void ClusterSimpleMigrateSlots() msp = ClusterTestUtils.MergeSlotPortMap(msp, context.clusterTestUtils.GetSlotPortMapFromNode(i, context.logger)); Assert.AreEqual(msp.Count, 16384); - context.logger.LogDebug($"1. Creating data"); + context.logger.LogDebug("1. Creating data"); var keyCount = 100; var slot = CreateSingleSlotData(keyLen: 16, valueLen: 16, keyTagEnd: 6, keyCount, out var data); var sourceIndex = context.clusterTestUtils.GetSourceNodeIndexFromSlot((ushort)slot, context.logger); @@ -608,11 +608,11 @@ public void ClusterSimpleMigrateSlots() Assert.AreEqual((ushort)slot, _slot); } - context.logger.LogDebug($"3. Initiating async migration"); + context.logger.LogDebug("3. Initiating async migration"); // Initiate Migration context.clusterTestUtils.MigrateSlots(sourcePort, targetPort, new List() { slot }, logger: context.logger); - context.logger.LogDebug($"4. Checking keys starting"); + context.logger.LogDebug("4. Checking keys starting"); // Wait for keys to become available for reading var keysList = data.Keys.ToList(); for (var i = 0; i < keysList.Count; i++) @@ -627,9 +627,9 @@ public void ClusterSimpleMigrateSlots() Assert.AreEqual(targetPort, _port, $"[{sourcePort}] => [{targetPort}] == {_port} | expected: {targetPort}, actual: {_port}"); Assert.AreEqual(data[keysList[i]], Encoding.ASCII.GetBytes(value), $"[{sourcePort}] => [{targetPort}] == {_port} | expected: {Encoding.ASCII.GetString(data[keysList[i]])}, actual: {value}"); } - context.logger.LogDebug($"5. Checking keys done"); + context.logger.LogDebug("5. Checking keys done"); - context.logger.LogDebug($"6. Checking configuration update starting"); + context.logger.LogDebug("6. Checking configuration update starting"); // Check if configuration has updated by var otherPorts = context.clusterTestUtils.GetEndPoints().Select(x => ((IPEndPoint)x).Port).Where(x => x != sourcePort || x != targetPort); while (true) @@ -651,16 +651,16 @@ public void ClusterSimpleMigrateSlots() break; } - context.logger.LogDebug($"7. Checking configuration update done"); + context.logger.LogDebug("7. Checking configuration update done"); context.clusterTestUtils.WaitForMigrationCleanup(context.logger); - context.logger.LogDebug($"8. ClusterSimpleMigrateSlotsTest done"); + context.logger.LogDebug("8. ClusterSimpleMigrateSlotsTest done"); } [Test, Order(7)] [Category("CLUSTER")] public void ClusterSimpleMigrateSlotsExpiry() { - context.logger.LogDebug($"0. ClusterSimpleMigrateSlotsExpiryTest started"); + context.logger.LogDebug("0. ClusterSimpleMigrateSlotsExpiryTest started"); context.CreateInstances(defaultShards, useTLS: UseTLS); context.CreateConnection(useTLS: UseTLS); _ = context.clusterTestUtils.SimpleSetupCluster(logger: context.logger); @@ -672,7 +672,7 @@ public void ClusterSimpleMigrateSlotsExpiry() var keyCountRet = context.clusterTestUtils.CountKeysInSlot(slot); Assert.AreEqual(keyCountRet, keyExpiryCount / 2); - context.logger.LogDebug($"2. Count keys in slot after expiry"); + context.logger.LogDebug("2. Count keys in slot after expiry"); keyCountRet = 100; context.logger.LogDebug("3. Creating slot data {keyCountRet} with expiry started", keyCountRet); @@ -685,21 +685,21 @@ public void ClusterSimpleMigrateSlotsExpiry() context.logger.LogDebug("4. Creating slot data {keyCountRet} with expiry done", keyCountRet); - context.logger.LogDebug($"5. Initiating migration"); + context.logger.LogDebug("5. Initiating migration"); context.clusterTestUtils.MigrateSlots(context.clusterTestUtils.GetEndPoint(sourceNodeIndex), context.clusterTestUtils.GetEndPoint(targetNodeIndex), new List() { _slot }, logger: context.logger); - context.logger.LogDebug($"6. Finished migration"); + context.logger.LogDebug("6. Finished migration"); - context.logger.LogDebug($"7. Checking migrating keys started"); + context.logger.LogDebug("7. Checking migrating keys started"); do { _ = Thread.Yield(); keyCountRet = context.clusterTestUtils.CountKeysInSlot(targetNodeIndex, slot, context.logger); } while (keyCountRet == -1 || keyCountRet > keyExpiryCount / 2); Assert.AreEqual(keyExpiryCount / 2, keyCountRet); - context.logger.LogDebug($"8. Checking migrating keys done"); + context.logger.LogDebug("8. Checking migrating keys done"); context.clusterTestUtils.WaitForMigrationCleanup(context.logger); - context.logger.LogDebug($"9. ClusterSimpleMigrateSlotsExpiryTest done"); + context.logger.LogDebug("9. ClusterSimpleMigrateSlotsExpiryTest done"); } private (string, List>) DoZADD(int nodeIndex, byte[] key, int memberCount, int memberSize = 8, int scoreMin = int.MinValue, int scoreMax = int.MaxValue) @@ -846,7 +846,7 @@ private string DoZCOUNT(int nodeIndex, byte[] key, out int count, out string add [Category("CLUSTER")] public void ClusterSimpleMigrateSlotsWithObjects() { - context.logger.LogDebug($"0. ClusterSimpleMigrateSlotsWithObjectsTest started"); + context.logger.LogDebug("0. ClusterSimpleMigrateSlotsWithObjectsTest started"); var Port = TestUtils.Port; var Shards = defaultShards; context.CreateInstances(defaultShards, useTLS: UseTLS); @@ -863,7 +863,7 @@ public void ClusterSimpleMigrateSlotsWithObjects() var memberCount = 10; Assert.AreEqual(7638, slot); - context.logger.LogDebug($"1. Loading object keys data started"); + context.logger.LogDebug("1. Loading object keys data started"); List> memberPair; (_, memberPair) = DoZADD(sourceNodeIndex, key, memberCount); var resp = DoZCOUNT(sourceNodeIndex, key, out var count, out var _Address, out var _Port, out var _Slot, logger: context.logger); @@ -873,7 +873,7 @@ public void ClusterSimpleMigrateSlotsWithObjects() (resp, members) = DoZRANGE(sourceNodeIndex, key, out _Address, out _Port, out _Slot, context.logger); Assert.AreEqual(memberPair.Select(x => x.Item2).ToList(), members); - context.logger.LogDebug($"2. Loading object keys data done"); + context.logger.LogDebug("2. Loading object keys data done"); var sourceEndPoint = context.clusterTestUtils.GetEndPoint(sourceNodeIndex); var targetEndPoint = context.clusterTestUtils.GetEndPoint(targetNodeIndex); @@ -881,7 +881,7 @@ public void ClusterSimpleMigrateSlotsWithObjects() context.clusterTestUtils.MigrateSlots(context.clusterTestUtils.GetEndPoint(sourceNodeIndex), context.clusterTestUtils.GetEndPoint(targetNodeIndex), new List() { slot }, logger: context.logger); context.logger.LogDebug("4. Migrating slot {slot} started {sourceEndPoint.Port} to {targetEndPoint.Port} done", slot, sourceEndPoint.Port, targetEndPoint.Port); - context.logger.LogDebug($"5. Checking migrated keys started"); + context.logger.LogDebug("5. Checking migrated keys started"); count = 0; do { @@ -890,19 +890,19 @@ public void ClusterSimpleMigrateSlotsWithObjects() while (!resp.Equals("OK")); Assert.AreEqual(count, memberCount); - context.logger.LogDebug($"6. Checking migrated keys done"); + context.logger.LogDebug("6. Checking migrated keys done"); (resp, members) = DoZRANGE(targetNodeIndex, key, out _Address, out _Port, out _Slot); Assert.AreEqual(memberPair.Select(x => Encoding.ASCII.GetString(x.Item2)).ToList(), members); context.clusterTestUtils.WaitForMigrationCleanup(context.logger); - context.logger.LogDebug($"7. ClusterSimpleMigrateSlotsWithObjectsTest done"); + context.logger.LogDebug("7. ClusterSimpleMigrateSlotsWithObjectsTest done"); } [Test, Order(9)] [Category("CLUSTER")] public void ClusterSimpleMigrateKeys() { - context.logger.LogDebug($"0. ClusterSimpleMigrateKeysTest started"); + context.logger.LogDebug("0. ClusterSimpleMigrateKeysTest started"); context.CreateInstances(defaultShards, useTLS: UseTLS); context.CreateConnection(useTLS: UseTLS); _ = context.clusterTestUtils.SimpleSetupCluster(logger: context.logger); @@ -952,9 +952,9 @@ public void ClusterSimpleMigrateKeys() Assert.AreEqual(keys, keysInSlot); context.logger.LogDebug("6. GetKeysInSlot {keysInSlot.Count}", keysInSlot.Count); - context.logger.LogDebug($"7. MigrateKeys starting"); + context.logger.LogDebug("7. MigrateKeys starting"); context.clusterTestUtils.MigrateKeys(context.clusterTestUtils.GetEndPoint(sourceNodeIndex), context.clusterTestUtils.GetEndPoint(targetNodeIndex), keysInSlot, context.logger); - context.logger.LogDebug($"8. MigrateKeys done"); + context.logger.LogDebug("8. MigrateKeys done"); var respNodeTarget = context.clusterTestUtils.SetSlot(targetNodeIndex, _workingSlot, "NODE", targetNodeId, logger: context.logger); Assert.AreEqual(respNodeTarget, "OK"); @@ -1003,14 +1003,14 @@ public void ClusterSimpleMigrateKeys() context.logger.LogDebug("14. Checking migrate keys done"); context.clusterTestUtils.WaitForMigrationCleanup(context.logger); - context.logger.LogDebug($"15. ClusterSimpleMigrateKeysTest done"); + context.logger.LogDebug("15. ClusterSimpleMigrateKeysTest done"); } [Test, Order(10)] [Category("CLUSTER")] public void ClusterSimpleMigrateKeysWithObjects() { - context.logger.LogDebug($"0. ClusterSimpleMigrateKeysWithObjectsTest started"); + context.logger.LogDebug("0. ClusterSimpleMigrateKeysWithObjectsTest started"); var Port = TestUtils.Port; var Shards = defaultShards; context.CreateInstances(Shards, useTLS: UseTLS); @@ -1049,7 +1049,7 @@ public void ClusterSimpleMigrateKeysWithObjects() string _Address; int _Port; int _Slot; - context.logger.LogDebug($"3. Checking keys before migration started"); + context.logger.LogDebug("3. Checking keys before migration started"); foreach (var _key in data.Keys) { var resp = DoZCOUNT(sourceNodeIndex, key, out var count, out _Address, out _Port, out _Slot, logger: context.logger); @@ -1060,9 +1060,9 @@ public void ClusterSimpleMigrateKeysWithObjects() (resp, members) = DoZRANGE(sourceNodeIndex, _key, out _Address, out _Port, out _Slot); var expectedMembers = data[_key].Select(x => Encoding.ASCII.GetString(x.Item2)).ToList(); Assert.AreEqual(expectedMembers, members); - context.logger.LogDebug($"2. Loading object keys data done"); + context.logger.LogDebug("2. Loading object keys data done"); } - context.logger.LogDebug($"4. Checking keys before migration done"); + context.logger.LogDebug("4. Checking keys before migration done"); // Start Migration var respImport = context.clusterTestUtils.SetSlot(targetNodeIndex, _slot, "IMPORTING", sourceNodeId, logger: context.logger); @@ -1081,9 +1081,9 @@ public void ClusterSimpleMigrateKeysWithObjects() Assert.AreEqual(keys, keysInSlot); context.logger.LogDebug("8. GetKeysInSlot {keysInSlot.Count}", keysInSlot.Count); - context.logger.LogDebug($"9. MigrateKeys starting"); + context.logger.LogDebug("9. MigrateKeys starting"); context.clusterTestUtils.MigrateKeys(context.clusterTestUtils.GetEndPoint(sourceNodeIndex), context.clusterTestUtils.GetEndPoint(targetNodeIndex), keysInSlot, context.logger); - context.logger.LogDebug($"10. MigrateKeys done"); + context.logger.LogDebug("10. MigrateKeys done"); var respNodeTarget = context.clusterTestUtils.SetSlot(targetNodeIndex, _slot, "NODE", targetNodeId, logger: context.logger); Assert.AreEqual(respNodeTarget, "OK"); @@ -1120,11 +1120,11 @@ public void ClusterSimpleMigrateKeysWithObjects() (resp, members) = DoZRANGE(targetNodeIndex, _key, out _Address, out _Port, out _Slot, context.logger); var expectedMembers = data[_key].Select(x => Encoding.ASCII.GetString(x.Item2)).ToList(); Assert.AreEqual(expectedMembers, members); - context.logger.LogDebug($"2. Loading object keys data done"); + context.logger.LogDebug("2. Loading object keys data done"); } context.logger.LogDebug("15. Checking migrate keys done"); context.clusterTestUtils.WaitForMigrationCleanup(context.logger); - context.logger.LogDebug($"16. ClusterSimpleMigrateKeysWithObjectsTest done"); + context.logger.LogDebug("16. ClusterSimpleMigrateKeysWithObjectsTest done"); } private void MigrateSlotsTask(int sourceNodePort, int targetNodePort, List slots, ILogger logger = null) @@ -1238,7 +1238,7 @@ private void OperateOnSlotsTask(Dictionary> data [Category("CLUSTER")] public void ClusterSimpleMigrateWithReadWrite() { - context.logger.LogDebug($"0. ClusterSimpleMigrateTestWithReadWrite started"); + context.logger.LogDebug("0. ClusterSimpleMigrateTestWithReadWrite started"); var Shards = defaultShards; context.CreateInstances(Shards, useTLS: UseTLS); context.CreateConnection(useTLS: UseTLS); @@ -1261,7 +1261,7 @@ public void ClusterSimpleMigrateWithReadWrite() new HashSet(context.clusterTestUtils.GetOwnedSlotsFromNode(sourceNodeIndex, context.logger))); context.logger.LogDebug("2. CreateMultiSlotData {keyCount} done", keyCount); - context.logger.LogDebug($"2. Running workload and migration task"); + context.logger.LogDebug("2. Running workload and migration task"); var migratedSlots = data.Keys.ToList(); MigrateSlotsTask( context.clusterTestUtils.GetEndPoint(sourceNodeIndex).Port, @@ -1269,9 +1269,9 @@ public void ClusterSimpleMigrateWithReadWrite() migratedSlots, logger: context.logger); OperateOnSlotsTask(data, targetNodeIndex); - context.logger.LogDebug($"3. Migration and workload done"); + context.logger.LogDebug("3. Migration and workload done"); - context.logger.LogDebug($"4. Checking keys after migration started"); + context.logger.LogDebug("4. Checking keys after migration started"); foreach (var entry in operatedOnData) { var key = entry.Item2; @@ -1289,8 +1289,8 @@ public void ClusterSimpleMigrateWithReadWrite() Assert.AreEqual(context.clusterTestUtils.GetEndPoint(targetNodeIndex).Address.ToString(), address); } - context.logger.LogDebug($"5. Checking keys after migration done"); - context.logger.LogDebug($"6. ClusterSimpleMigrateTestWithReadWrite done"); + context.logger.LogDebug("5. Checking keys after migration done"); + context.logger.LogDebug("6. ClusterSimpleMigrateTestWithReadWrite done"); context.clusterTestUtils.WaitForMigrationCleanup(context.logger); } @@ -1344,7 +1344,7 @@ public void ClusterSimpleTxn() [TestCaseSource("_slotranges")] public void ClusterSimpleMigrateSlotsRanges(List migrateRange) { - context.logger.LogDebug($"0. ClusterSimpleMigrateSlotsRanges started"); + context.logger.LogDebug("0. ClusterSimpleMigrateSlotsRanges started"); var Shards = defaultShards; context.CreateInstances(Shards, useTLS: UseTLS); context.CreateConnection(useTLS: UseTLS); @@ -1396,7 +1396,7 @@ public void ClusterSimpleMigrateSlotsRanges(List migrateRange) [TestCaseSource("_slotranges")] public void ClusterSimpleMigrateWithAuth(List migrateRange) { - context.logger.LogDebug($"0. ClusterSimpleMigrateWithAuth started"); + context.logger.LogDebug("0. ClusterSimpleMigrateWithAuth started"); var Shards = defaultShards; context.CreateInstances(Shards, useTLS: UseTLS); context.CreateConnection(useTLS: UseTLS); @@ -1448,7 +1448,7 @@ public void ClusterSimpleMigrateWithAuth(List migrateRange) [Category("CLUSTER")] public void ClusterAllowWritesDuringMigrateTest() { - context.logger.LogDebug($"0. ClusterSimpleMigrateTestWithReadWrite started"); + context.logger.LogDebug("0. ClusterSimpleMigrateTestWithReadWrite started"); var Shards = defaultShards; context.CreateInstances(Shards, useTLS: UseTLS); context.CreateConnection(useTLS: UseTLS); @@ -1577,7 +1577,7 @@ Task WriteWorkload(IPEndPoint endPoint, byte[] key, int keyLen = 16) [Category("CLUSTER")] public void ClusterMigrateForgetTest() { - context.logger.LogDebug($"0. ClusterSimpleMigrateSlotsRanges started"); + context.logger.LogDebug("0. ClusterSimpleMigrateSlotsRanges started"); var Shards = defaultShards; context.CreateInstances(Shards, useTLS: UseTLS); context.CreateConnection(useTLS: UseTLS); diff --git a/test/Garnet.test.cluster/ClusterTestUtils.cs b/test/Garnet.test.cluster/ClusterTestUtils.cs index 128a4ff477..cb90c9343a 100644 --- a/test/Garnet.test.cluster/ClusterTestUtils.cs +++ b/test/Garnet.test.cluster/ClusterTestUtils.cs @@ -2650,7 +2650,7 @@ public void WaitForReplicaAofSync(int primaryIndex, int secondaryIndex, ILogger break; BackOff(); } - logger?.LogInformation($"Replication offset for primary {primaryIndex} and secondary {secondaryIndex} is {primaryReplicationOffset}"); + logger?.LogInformation("Replication offset for primary {primaryIndex} and secondary {secondaryIndex} is {primaryReplicationOffset}", primaryIndex, secondaryIndex, primaryReplicationOffset); } public void WaitForConnectedReplicaCount(int primaryIndex, long minCount, ILogger logger = null)