From b40e38ea1441bf76609e8503f336410d591251a5 Mon Sep 17 00:00:00 2001 From: mphanias Date: Thu, 23 Nov 2023 15:00:21 +0530 Subject: [PATCH 1/3] OM86 - initial checkin support for micro benchmarks issue micro-benchmark commands during latency-watcher passtwo keys namespace-watcher and node-watcher add respective namespace and service benchmark flags which are later consumed by latency-watcher --- watcher_latency.go | 69 ++++++++++++++++++++++++++++++++++++++++++- watcher_namespaces.go | 10 +++++++ watcher_node_stats.go | 5 ++++ 3 files changed, 83 insertions(+), 1 deletion(-) diff --git a/watcher_latency.go b/watcher_latency.go index 10125525..59cd9bee 100644 --- a/watcher_latency.go +++ b/watcher_latency.go @@ -1,6 +1,8 @@ package main import ( + "strings" + "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" @@ -9,6 +11,8 @@ import ( type LatencyWatcher struct { } +var LatencyBenchmarks = make(map[string]float64) + func (lw *LatencyWatcher) describe(ch chan<- *prometheus.Desc) {} func (lw *LatencyWatcher) passOneKeys() []string { @@ -33,7 +37,7 @@ func (lw *LatencyWatcher) passTwoKeys(rawMetrics map[string]string) (latencyComm } if ok { - return []string{"latencies:"} + return lw.getLatenciesCommands(rawMetrics) } return []string{"latency:"} @@ -99,3 +103,66 @@ func (lw *LatencyWatcher) refresh(o *Observer, infoKeys []string, rawMetrics map return nil } + +// Utility methods +// checks if a stat can be considered for latency stat retrieval +func canConsiderLatencyCommand(stat string) bool { + return (strings.Contains(stat, "enable-benchmarks-") || + strings.Contains(stat, "enable-hist-")) // hist-proxy & hist-info - both at service level +} + +func (lw *LatencyWatcher) getLatenciesCommands(rawMetrics map[string]string) []string { + var commands = []string{"latencies:"} + + // below latency-command are added to the auto-enabled list, i.e. latencies: command + // re-repl is auto-enabled, but not coming as part of latencies: list, hence we are adding it explicitly + // + // Hashmap content format := namespace- = <0/1> + for ns_latency_enabled_benchmark := range LatencyBenchmarks { + l_value := LatencyBenchmarks[ns_latency_enabled_benchmark] + // only if enabled, fetch the metrics + if l_value == 1 { + // if enable-hist-proxy + // command = latencies:hist={test}-proxy + // else if enable-benchmarks-fabric + // command = latencies:hist=benchmarks-fabric + // else if re-repl + // command = latencies:hist={test}-re-repl + + if strings.Contains(ns_latency_enabled_benchmark, "re-repl") { + // Exception case + ns := strings.Split(ns_latency_enabled_benchmark, "-")[0] + l_command := "latencies:hist={" + ns + "}-re-repl" + commands = append(commands, l_command) + } else if strings.Contains(ns_latency_enabled_benchmark, "enable-hist-proxy") { + // Exception case + ns := strings.Split(ns_latency_enabled_benchmark, "-")[0] + l_command := "latencies:hist={" + ns + "}-proxy" + commands = append(commands, l_command) + } else if strings.Contains(ns_latency_enabled_benchmark, "enable-benchmarks-fabric") { + // Exception case + l_command := "latencies:hist=benchmarks-fabric" + commands = append(commands, l_command) + } else if strings.Contains(ns_latency_enabled_benchmark, "enable-hist-info") { + // Exception case + l_command := "latencies:hist=info" + commands = append(commands, l_command) + } else if strings.Contains(ns_latency_enabled_benchmark, "-benchmarks-") { + // remaining enabled benchmark latencies like + // enable-benchmarks-fabric, enable-benchmarks-ops-sub, enable-benchmarks-read + // enable-benchmarks-write, enable-benchmarks-udf, enable-benchmarks-udf-sub, enable-benchmarks-batch-sub + + // format:= test-enable-benchmarks-read (or) test-enable-hist-proxy + ns := strings.Split(ns_latency_enabled_benchmark, "-")[0] + benchmarks_start_index := strings.LastIndex(ns_latency_enabled_benchmark, "-benchmarks-") + l_command := ns_latency_enabled_benchmark[benchmarks_start_index:] + l_command = "latencies:hist={" + ns + "}" + l_command + commands = append(commands, l_command) + } + } + } + + log.Tracef("latency-passtwokeys:%s", commands) + + return commands +} diff --git a/watcher_namespaces.go b/watcher_namespaces.go index 17133e84..fc5c3b56 100644 --- a/watcher_namespaces.go +++ b/watcher_namespaces.go @@ -210,6 +210,16 @@ func (nw *NamespaceWatcher) refreshNamespaceStats(singleInfoKey string, infoKeys // push to prom-channel pushToPrometheus(asMetric, pv, labels, labelValues, ch) } + + // below code section is to ensure ns+latencies combination is handled during LatencyWatcher + // + // check and if latency benchmarks stat - is it enabled (bool true==1 and false==0 after conversion) + if canConsiderLatencyCommand(stat) { + LatencyBenchmarks[nsName+"-"+stat] = pv + } + // append default re-repl, as this auto-enabled, but not coming as part of latencies, we need this as namespace is available only here + LatencyBenchmarks[nsName+"-re-repl"] = 1 + } } diff --git a/watcher_node_stats.go b/watcher_node_stats.go index 6ab99b62..09e75d06 100644 --- a/watcher_node_stats.go +++ b/watcher_node_stats.go @@ -75,5 +75,10 @@ func (sw *StatsWatcher) handleRefresh(o *Observer, nodeRawMetrics string, cluste pushToPrometheus(asMetric, pv, labels, labelsValues, ch) + // check and if latency benchmarks stat, is it enabled (bool true==1 and false==0 after conversion) + if canConsiderLatencyCommand(stat) { + LatencyBenchmarks["service-"+stat] = pv + } + } } From 70180fa39e3d21feea0ef9df6f0edd0917cafa59 Mon Sep 17 00:00:00 2001 From: mphanias Date: Thu, 23 Nov 2023 16:51:43 +0530 Subject: [PATCH 2/3] OM86 --- watcher_latency.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/watcher_latency.go b/watcher_latency.go index 59cd9bee..76b5aa18 100644 --- a/watcher_latency.go +++ b/watcher_latency.go @@ -60,6 +60,18 @@ func (lw *LatencyWatcher) refresh(o *Observer, infoKeys []string, rawMetrics map } } + // loop all the latency infokeys + for ik := range infoKeys { + parseSingleLatenciesKey(infoKeys[ik], rawMetrics, allowedLatenciesList, blockedLatenciessList, ch) + } + + return nil +} + +func parseSingleLatenciesKey(singleLatencyKey string, rawMetrics map[string]string, + allowedLatenciesList map[string]struct{}, + blockedLatenciessList map[string]struct{}, ch chan<- prometheus.Metric) error { + var latencyStats map[string]StatsMap if rawMetrics["latencies:"] != "" { From c663723162b43bb7df6625200713f07ae68bec7d Mon Sep 17 00:00:00 2001 From: mphanias Date: Thu, 23 Nov 2023 17:00:10 +0530 Subject: [PATCH 3/3] OM86 --- watcher_latency.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/watcher_latency.go b/watcher_latency.go index 76b5aa18..55aa0043 100644 --- a/watcher_latency.go +++ b/watcher_latency.go @@ -75,7 +75,7 @@ func parseSingleLatenciesKey(singleLatencyKey string, rawMetrics map[string]stri var latencyStats map[string]StatsMap if rawMetrics["latencies:"] != "" { - latencyStats = parseLatencyInfo(rawMetrics["latencies:"], int(config.Aerospike.LatencyBucketsCount)) + latencyStats = parseLatencyInfo(rawMetrics[singleLatencyKey], int(config.Aerospike.LatencyBucketsCount)) } else { latencyStats = parseLatencyInfoLegacy(rawMetrics["latency:"], int(config.Aerospike.LatencyBucketsCount)) }