diff --git a/README.md b/README.md
index 88663a82..c555f51d 100644
--- a/README.md
+++ b/README.md
@@ -18,11 +18,11 @@
:rocket: **qryn** is a _drop-in Grafana compatible_ **polyglot observability** framework
- **Logs, Metrics and Traces** living happily together. Drop-in compatible with multiple vendors formats.
- Native [LogQL/PromQL/TempoQL APIs](https://qryn.cloud) support for [querying](https://github.com/lmangani/qryn/wiki/LogQL-for-Beginners), [processing](https://github.com/lmangani/qryn/wiki/LogQL-Supported-Queries), [tracing](https://github.com/lmangani/qryn/wiki/Tempo-Tracing) and [alerting](https://github.com/lmangani/qryn/wiki/Ruler---Alerts) [^2] in [Grafana](http://docs.grafana.org/features/explore/) [^3]
+- Search, filter and extract metrics from _logs, events, spans and traces_ using familiar languages. _SQL Optional_.
- Ingestion [APIs](https://qryn.metrico.in/#/support) transparently compatible with [Opentelemetry, Loki, Prometheus, InfluxDB, Elastic](https://qryn.dev) _and [more](https://github.com/metrico/otel-collector)_
-- Dynamically search, filter and extract metrics from _logs, events, spans and traces_. _NO SQL required_.
- Ready to use with popular Agents such as [Promtail, Grafana-Agent, Vector, Logstash, Telegraf](https://qryn.metrico.in/#/ingestion) _and more_
- Built in [Explore UI](https://github.com/metrico/cloki-view) and [CLI](https://github.com/lmangani/vLogQL) for querying supported datasources
-- Designed for edge _(js/wasm)_ and core/backend deployments _(go/rust)_.
+- Designed for edge _(js/bun/wasm)_ and core/backend deployments _(golang/rust)_.
- Total data control. Compatible with [ClickHouse](https://clickhouse.com/) or [InfluxDB IOx](https://influxdata.com) with S3 object storage.
:rocket: **qryn.cloud** is the _supercharged_ **qryn** version developed in _go_ with additional _functionality, speed and features!_
diff --git a/package-lock.json b/package-lock.json
index 4536331a..f846c24e 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "qryn",
- "version": "2.4.2",
+ "version": "2.4.3",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "qryn",
- "version": "2.4.2",
+ "version": "2.4.3",
"hasInstallScript": true,
"license": "AGPL-3.0",
"dependencies": {
diff --git a/package.json b/package.json
index 3bdfb8ac..8ac48b90 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "qryn",
- "version": "2.4.2",
+ "version": "2.4.3",
"description": "Polyglot Observability Stack with ClickHouse storage",
"main": "qryn.js",
"bin": {
diff --git a/parser/registry/smart_optimizations/optimization_v3_2.js b/parser/registry/smart_optimizations/optimization_v3_2.js
index e44670e2..ede56134 100644
--- a/parser/registry/smart_optimizations/optimization_v3_2.js
+++ b/parser/registry/smart_optimizations/optimization_v3_2.js
@@ -1,9 +1,12 @@
-const { getDuration, dist, Aliased } = require('../common')
+const { getDuration, Aliased } = require('../common')
const reg = require('./log_range_agg_reg_v3_2')
const Sql = require('@cloki/clickhouse-sql')
const { DATABASE_NAME, checkVersion } = require('../../../lib/utils')
const streamSelectorReg = require('../stream_selector_operator_registry')
const aggOpReg = require('../high_level_aggregation_registry')
+const { clusterName } = require('../../../common')
+const logger = require('../../../lib/logger')
+const _dist = clusterName ? '_dist' : ''
/**
*
@@ -46,14 +49,15 @@ module.exports.apply = (token, fromNS, toNS, stepNS) => {
: Sql.Gt('samples.timestamp_ns', fromNS)
let q = (new Sql.Select())
.select(['samples.fingerprint', 'fingerprint'])
- .from([`${DATABASE_NAME()}.metrics_15s`, 'samples'])
+ .from([`${DATABASE_NAME()}.metrics_15s${_dist}`, 'samples'])
.where(tsClause)
- q.join(new Aliased(`${DATABASE_NAME()}.time_series${dist}`, 'time_series'), 'left any',
+ q.join(new Aliased(`${DATABASE_NAME()}.time_series`, 'time_series'), 'left any',
Sql.Eq('samples.fingerprint', new Sql.Raw('time_series.fingerprint')))
q.select([new Sql.Raw('any(JSONExtractKeysAndValues(time_series.labels, \'String\'))'), 'labels'])
q.ctx = {
- step: stepNS / 1000000000
+ step: stepNS / 1000000000,
+ inline: !!clusterName
}
for (const streamSelectorRule of token.Children('log_stream_selector_rule')) {
@@ -68,5 +72,7 @@ module.exports.apply = (token, fromNS, toNS, stepNS) => {
q = aggOpReg[aggOp.Child('aggregation_operator_fn').value](aggOp, q)
}
+ logger.debug(q.toString())
+
return q
}