diff --git a/src/ts_catalog/continuous_aggs_watermark.c b/src/ts_catalog/continuous_aggs_watermark.c index d7a9becb7b4..c022745f21c 100644 --- a/src/ts_catalog/continuous_aggs_watermark.c +++ b/src/ts_catalog/continuous_aggs_watermark.c @@ -17,6 +17,7 @@ #include #include "debug_point.h" +#include "guc.h" #include "hypertable.h" #include "ts_catalog/continuous_agg.h" #include "ts_catalog/continuous_aggs_watermark.h" @@ -321,8 +322,10 @@ ts_cagg_watermark_update(Hypertable *mat_ht, int64 watermark, bool watermark_isn /* If we have a real-time CAgg, it uses a watermark function. So, we have to invalidate the rel * cache to force a replanning of prepared statements. See cagg_watermark_update_internal for - * more information. */ - bool invalidate_rel_cache = !cagg->data.materialized_only; + * more information. If the GUC enable_cagg_watermark_constify=false then it's not necessary + * to invalidate relation cache. */ + bool invalidate_rel_cache = + !cagg->data.materialized_only && ts_guc_enable_cagg_watermark_constify; watermark = cagg_compute_watermark(cagg, watermark, watermark_isnull); cagg_watermark_update_internal(mat_ht->fd.id, diff --git a/tsl/test/expected/jit.out b/tsl/test/expected/jit.out index e9b1d3bcd48..8ea39bdf390 100644 --- a/tsl/test/expected/jit.out +++ b/tsl/test/expected/jit.out @@ -17,6 +17,7 @@ SET jit_above_cost=0; SET jit_inline_above_cost=0; SET jit_optimize_above_cost=0; SET jit_tuple_deforming=on; +SET enable_hashagg=off; \ir :TEST_LOAD_NAME -- This file and its contents are licensed under the Timescale License. -- Please see the included NOTICE for copyright information and @@ -198,16 +199,19 @@ SELECT * FROM jit_device_summary WHERE metric_spread = 1800 ORDER BY bucket DESC Output: _hyper_4_6_chunk.bucket, _hyper_4_6_chunk.device_id, _hyper_4_6_chunk.metric_avg, _hyper_4_6_chunk.metric_spread Index Cond: (_hyper_4_6_chunk.bucket < 'Mon Dec 31 01:00:00 2018 PST'::timestamp with time zone) Filter: (_hyper_4_6_chunk.metric_spread = '1800'::double precision) - -> HashAggregate + -> GroupAggregate Output: (time_bucket('@ 1 hour'::interval, _hyper_3_5_chunk.observation_time)), _hyper_3_5_chunk.device_id, avg(_hyper_3_5_chunk.metric), (max(_hyper_3_5_chunk.metric) - min(_hyper_3_5_chunk.metric)) - Group Key: time_bucket('@ 1 hour'::interval, _hyper_3_5_chunk.observation_time), _hyper_3_5_chunk.device_id + Group Key: (time_bucket('@ 1 hour'::interval, _hyper_3_5_chunk.observation_time)), _hyper_3_5_chunk.device_id Filter: ((max(_hyper_3_5_chunk.metric) - min(_hyper_3_5_chunk.metric)) = '1800'::double precision) - -> Result - Output: time_bucket('@ 1 hour'::interval, _hyper_3_5_chunk.observation_time), _hyper_3_5_chunk.device_id, _hyper_3_5_chunk.metric - -> Index Scan using _hyper_3_5_chunk_jit_test_contagg_observation_time_idx on _timescaledb_internal._hyper_3_5_chunk - Output: _hyper_3_5_chunk.observation_time, _hyper_3_5_chunk.device_id, _hyper_3_5_chunk.metric - Index Cond: (_hyper_3_5_chunk.observation_time >= 'Mon Dec 31 01:00:00 2018 PST'::timestamp with time zone) -(19 rows) + -> Sort + Output: (time_bucket('@ 1 hour'::interval, _hyper_3_5_chunk.observation_time)), _hyper_3_5_chunk.device_id, _hyper_3_5_chunk.metric + Sort Key: (time_bucket('@ 1 hour'::interval, _hyper_3_5_chunk.observation_time)), _hyper_3_5_chunk.device_id + -> Result + Output: time_bucket('@ 1 hour'::interval, _hyper_3_5_chunk.observation_time), _hyper_3_5_chunk.device_id, _hyper_3_5_chunk.metric + -> Index Scan using _hyper_3_5_chunk_jit_test_contagg_observation_time_idx on _timescaledb_internal._hyper_3_5_chunk + Output: _hyper_3_5_chunk.observation_time, _hyper_3_5_chunk.device_id, _hyper_3_5_chunk.metric + Index Cond: (_hyper_3_5_chunk.observation_time >= 'Mon Dec 31 01:00:00 2018 PST'::timestamp with time zone) +(22 rows) -- generate the results into two different files \set ECHO errors diff --git a/tsl/test/expected/reorder.out b/tsl/test/expected/reorder.out index 9933a3f61e0..6b7257125ad 100644 --- a/tsl/test/expected/reorder.out +++ b/tsl/test/expected/reorder.out @@ -1273,9 +1273,10 @@ CREATE INDEX ct2_time_idx ON ct2(time DESC); CLUSTER ct2 USING ct2_time_idx; -- deleted chunks are removed correctly DELETE FROM ct2 where time < 2 OR val < 2; +VACUUM ct2; SELECT reorder_chunk('_timescaledb_internal._hyper_2_3_chunk', verbose => TRUE); INFO: reordering "_timescaledb_internal._hyper_2_3_chunk" using sequential scan and sort -INFO: "_hyper_2_3_chunk": found 2 removable, 3 nonremovable row versions in 1 pages +INFO: "_hyper_2_3_chunk": found 0 removable, 3 nonremovable row versions in 1 pages reorder_chunk --------------- diff --git a/tsl/test/sql/jit.sql b/tsl/test/sql/jit.sql index 4f6ade3a9c0..3b57d9c5740 100644 --- a/tsl/test/sql/jit.sql +++ b/tsl/test/sql/jit.sql @@ -20,6 +20,7 @@ SET jit_above_cost=0; SET jit_inline_above_cost=0; SET jit_optimize_above_cost=0; SET jit_tuple_deforming=on; +SET enable_hashagg=off; \ir :TEST_LOAD_NAME \set PREFIX 'EXPLAIN (VERBOSE, TIMING OFF, COSTS OFF, SUMMARY OFF)' diff --git a/tsl/test/sql/reorder.sql b/tsl/test/sql/reorder.sql index 768c2b5b0f7..dda69fafc2d 100644 --- a/tsl/test/sql/reorder.sql +++ b/tsl/test/sql/reorder.sql @@ -200,6 +200,7 @@ CLUSTER ct2 USING ct2_time_idx; -- deleted chunks are removed correctly DELETE FROM ct2 where time < 2 OR val < 2; +VACUUM ct2; SELECT reorder_chunk('_timescaledb_internal._hyper_2_3_chunk', verbose => TRUE); SELECT ctid, time, val FROM _timescaledb_internal._hyper_2_3_chunk ORDER BY time;