diff --git a/watcher_latency.go b/watcher_latency.go index 10125525..7d0bc7ac 100644 --- a/watcher_latency.go +++ b/watcher_latency.go @@ -1,6 +1,8 @@ package main import ( + "strings" + "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" @@ -9,6 +11,8 @@ import ( type LatencyWatcher struct { } +var LatencyBenchmarks = make(map[string]string) + func (lw *LatencyWatcher) describe(ch chan<- *prometheus.Desc) {} func (lw *LatencyWatcher) passOneKeys() []string { @@ -33,7 +37,7 @@ func (lw *LatencyWatcher) passTwoKeys(rawMetrics map[string]string) (latencyComm } if ok { - return []string{"latencies:"} + return lw.getLatenciesCommands(rawMetrics) } return []string{"latency:"} @@ -56,10 +60,25 @@ func (lw *LatencyWatcher) refresh(o *Observer, infoKeys []string, rawMetrics map } } + // loop all the latency infokeys + for _, infoKey := range infoKeys { + err := parseSingleLatenciesKey(infoKey, rawMetrics, allowedLatenciesList, blockedLatenciessList, ch) + if err != nil { + return err + } + } + + return nil +} + +func parseSingleLatenciesKey(singleLatencyKey string, rawMetrics map[string]string, + allowedLatenciesList map[string]struct{}, + blockedLatenciessList map[string]struct{}, ch chan<- prometheus.Metric) error { + var latencyStats map[string]StatsMap if rawMetrics["latencies:"] != "" { - latencyStats = parseLatencyInfo(rawMetrics["latencies:"], int(config.Aerospike.LatencyBucketsCount)) + latencyStats = parseLatencyInfo(rawMetrics[singleLatencyKey], int(config.Aerospike.LatencyBucketsCount)) } else { latencyStats = parseLatencyInfoLegacy(rawMetrics["latency:"], int(config.Aerospike.LatencyBucketsCount)) } @@ -99,3 +118,39 @@ func (lw *LatencyWatcher) refresh(o *Observer, infoKeys []string, rawMetrics map return nil } + +// Utility methods +// checks if a stat can be considered for latency stat retrieval +func isStatLatencyHistRelated(stat string) bool { + // is not enable-benchmarks-storage and (enable-benchmarks-* or enable-hist-*) + return (!strings.Contains(stat, "enable-benchmarks-storage")) && (strings.Contains(stat, "enable-benchmarks-") || + strings.Contains(stat, "enable-hist-")) // hist-proxy & hist-info - both at service level +} + +func (lw *LatencyWatcher) getLatenciesCommands(rawMetrics map[string]string) []string { + var commands = []string{"latencies:"} + + // Hashmap content format := namespace- = + for latencyHistName := range LatencyBenchmarks { + histTokens := strings.Split(latencyHistName, "-") + + histCommand := "latencies:hist=" + + // service-enable-benchmarks-fabric or ns-enable-benchmarks-ops-sub or service-enable-hist-info + if histTokens[0] != "service" { + histCommand = histCommand + "{" + histTokens[0] + "}-" + } + + if strings.Contains(latencyHistName, "enable-benchmarks-") { + histCommand = histCommand + strings.Join(histTokens[2:], "-") + } else { + histCommand = histCommand + strings.Join(histTokens[3:], "-") + } + + commands = append(commands, histCommand) + } + + log.Tracef("latency-passtwokeys:%s", commands) + + return commands +} diff --git a/watcher_namespaces.go b/watcher_namespaces.go index 17133e84..13231c54 100644 --- a/watcher_namespaces.go +++ b/watcher_namespaces.go @@ -177,8 +177,6 @@ func (nw *NamespaceWatcher) refreshNamespaceStats(singleInfoKey string, infoKeys sindexType := stats[SINDEX_TYPE] storageEngine := stats[STORAGE_ENGINE] - // fmt.Println(" storageEngine: ", storageEngine) - // if stat is index-type or sindex-type , append addl label if strings.HasPrefix(deviceType, INDEX_TYPE) && len(indexType) > 0 { labels = append(labels, METRIC_LABEL_INDEX) @@ -210,7 +208,21 @@ func (nw *NamespaceWatcher) refreshNamespaceStats(singleInfoKey string, infoKeys // push to prom-channel pushToPrometheus(asMetric, pv, labels, labelValues, ch) } + + // below code section is to ensure ns+latencies combination is handled during LatencyWatcher + // + // check and if latency benchmarks stat - is it enabled (bool true==1 and false==0 after conversion) + if isStatLatencyHistRelated(stat) { + delete(LatencyBenchmarks, nsName+"-"+stat) + + if pv == 1 { + LatencyBenchmarks[nsName+"-"+stat] = stat + } + } + } + // // append default re-repl, as this auto-enabled, but not coming as part of latencies, we need this as namespace is available only here + // LatencyBenchmarks[nsName+"-latency-hist-re-repl"] = "{" + nsName + "}-re-repl" } diff --git a/watcher_node_stats.go b/watcher_node_stats.go index 6ab99b62..ff8fda04 100644 --- a/watcher_node_stats.go +++ b/watcher_node_stats.go @@ -75,5 +75,15 @@ func (sw *StatsWatcher) handleRefresh(o *Observer, nodeRawMetrics string, cluste pushToPrometheus(asMetric, pv, labels, labelsValues, ch) + // check and if latency benchmarks stat, is it enabled (bool true==1 and false==0 after conversion) + if isStatLatencyHistRelated(stat) { + // remove old value as microbenchmark may get enabled / disable on-the-fly at server so we cannot rely on value + delete(LatencyBenchmarks, "service-"+stat) + + if pv == 1 { + LatencyBenchmarks["service-"+stat] = stat + } + } + } }