From 25d61ffaa2de1344f37249a2ba76cd81ba063c55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Nordstr=C3=B6m?= Date: Wed, 4 Dec 2024 13:17:37 +0100 Subject: [PATCH] Collect relation-level stats during compression During compression, column min/max stats are collected on a per-segment basis for orderby columns and those that have indexes. This change uses the same mechanism to collect relation-level min/max stats to be used by chunk skipping. This avoids, in worst case, an extra full table scan to gather these chunk column stats. For simplicity, stats gathering is enabled for all columns that can support it, even though a column might use neither segment-level stats nor relation-level (chunk column) stats. The overhead of collecting min/max values should be negligible. --- src/ts_catalog/chunk_column_stats.c | 32 +++-- src/ts_catalog/chunk_column_stats.h | 10 +- tsl/src/compression/api.c | 11 +- tsl/src/compression/compression.c | 104 +++++++++++----- tsl/src/compression/compression.h | 6 +- tsl/src/compression/segment_meta.c | 130 ++++++++++++++------ tsl/src/compression/segment_meta.h | 15 ++- tsl/test/expected/chunk_column_stats-15.out | 2 - tsl/test/expected/chunk_column_stats-16.out | 2 - tsl/test/expected/chunk_column_stats-17.out | 2 - 10 files changed, 229 insertions(+), 85 deletions(-) diff --git a/src/ts_catalog/chunk_column_stats.c b/src/ts_catalog/chunk_column_stats.c index 3aeaa438ea0..9fea92281da 100644 --- a/src/ts_catalog/chunk_column_stats.c +++ b/src/ts_catalog/chunk_column_stats.c @@ -739,7 +739,8 @@ ts_chunk_column_stats_lookup(int32 hypertable_id, int32 chunk_id, const char *co * updated. */ int -ts_chunk_column_stats_calculate(const Hypertable *ht, const Chunk *chunk) +ts_chunk_column_stats_calculate(const Hypertable *ht, const Chunk *chunk, + ChunkColumnStats **statsarray) { Size i = 0; ChunkRangeSpace *rs = ht->range_space; @@ -755,22 +756,38 @@ ts_chunk_column_stats_calculate(const Hypertable *ht, const Chunk *chunk) for (int range_index = 0; range_index < rs->num_range_cols; range_index++) { - Datum minmax[2]; + const Form_chunk_column_stats form = &rs->range_cols[range_index]; + const ChunkColumnStats *colstats = NULL; + ChunkColumnStats computed_stats; + AttrNumber attno; - char *col_name = NameStr(rs->range_cols[range_index].column_name); + const char *col_name = NameStr(form->column_name); Oid col_type; /* Get the attribute number in the HT for this column, and map to the chunk */ + /* TODO: fix unnecessary mapping */ attno = get_attnum(ht->main_table_relid, col_name); attno = ts_map_attno(ht->main_table_relid, chunk->table_id, attno); col_type = get_atttype(ht->main_table_relid, attno); + if (statsarray) + colstats = statsarray[AttrNumberGetAttrOffset(attno)]; + + if (NULL == colstats && ts_chunk_get_minmax(chunk->table_id, + col_type, + attno, + "column range", + computed_stats.minmax)) + { + colstats = &computed_stats; + } + /* calculate the min/max range for this column on this chunk */ - if (ts_chunk_get_minmax(chunk->table_id, col_type, attno, "column range", minmax)) + if (colstats) { Form_chunk_column_stats range; - int64 min = ts_time_value_to_internal(minmax[0], col_type); - int64 max = ts_time_value_to_internal(minmax[1], col_type); + int64 min = ts_time_value_to_internal(colstats->minmax[0], col_type); + int64 max = ts_time_value_to_internal(colstats->minmax[1], col_type); /* The end value is exclusive to the range, so incr by 1 */ if (max != DIMENSION_SLICE_MAXVALUE) @@ -821,7 +838,8 @@ ts_chunk_column_stats_calculate(const Hypertable *ht, const Chunk *chunk) } } else - ereport(WARNING, errmsg("unable to calculate min/max values for column ranges")); + ereport(WARNING, + errmsg("unable to calculate min/max column range for \"%s\"", col_name)); } MemoryContextSwitchTo(orig_mcxt); diff --git a/src/ts_catalog/chunk_column_stats.h b/src/ts_catalog/chunk_column_stats.h index c7aaaab9be2..a2ed52c6882 100644 --- a/src/ts_catalog/chunk_column_stats.h +++ b/src/ts_catalog/chunk_column_stats.h @@ -23,6 +23,13 @@ typedef struct ChunkRangeSpace FormData_chunk_column_stats range_cols[FLEXIBLE_ARRAY_MEMBER]; } ChunkRangeSpace; +typedef struct ChunkColumnStats +{ + /* Min and max, in that order */ + Datum minmax[2]; + bool isnull[2]; +} ChunkColumnStats; + #define CHUNKRANGESPACE_SIZE(num_columns) \ (sizeof(ChunkRangeSpace) + (sizeof(NameData) * (num_columns))) @@ -35,7 +42,8 @@ extern int ts_chunk_column_stats_update_by_id(int32 chunk_column_stats_id, extern Form_chunk_column_stats ts_chunk_column_stats_lookup(int32 hypertable_id, int32 chunk_id, const char *col_name); -extern TSDLLEXPORT int ts_chunk_column_stats_calculate(const Hypertable *ht, const Chunk *chunk); +extern TSDLLEXPORT int ts_chunk_column_stats_calculate(const Hypertable *ht, const Chunk *chunk, + ChunkColumnStats **statsarray); extern int ts_chunk_column_stats_insert(const Hypertable *ht, const Chunk *chunk); extern void ts_chunk_column_stats_drop(const Hypertable *ht, const char *col_name, bool *dropped); diff --git a/tsl/src/compression/api.c b/tsl/src/compression/api.c index f57460a4875..618ced82fea 100644 --- a/tsl/src/compression/api.c +++ b/tsl/src/compression/api.c @@ -525,10 +525,15 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid) * In the future, we can look at computing min/max entries in the compressed chunk * using the batch metadata and then recompute the range to handle DELETE cases. */ - if (cxt.srcht->range_space) - ts_chunk_column_stats_calculate(cxt.srcht, cxt.srcht_chunk); cstat = compress_chunk(cxt.srcht_chunk->table_id, compress_ht_chunk->table_id, insert_options); + + if (cxt.srcht->range_space && cstat.colstats) + { + ts_chunk_column_stats_calculate(cxt.srcht, cxt.srcht_chunk, cstat.colstats); + pfree(cstat.colstats); + } + after_size = ts_relation_size_impl(compress_ht_chunk->table_id); if (new_compressed_chunk) @@ -1370,7 +1375,7 @@ recompress_chunk_segmentwise_impl(Chunk *uncompressed_chunk) */ Hypertable *ht = ts_hypertable_get_by_id(uncompressed_chunk->fd.hypertable_id); if (ht->range_space) - ts_chunk_column_stats_calculate(ht, uncompressed_chunk); + ts_chunk_column_stats_calculate(ht, uncompressed_chunk, NULL); /*************** tuplesort state *************************/ Tuplesortstate *segment_tuplesortstate; diff --git a/tsl/src/compression/compression.c b/tsl/src/compression/compression.c index c8a239f9ae8..ab2fd5a6a1f 100644 --- a/tsl/src/compression/compression.c +++ b/tsl/src/compression/compression.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -34,6 +35,7 @@ #include "segment_meta.h" #include "ts_catalog/array_utils.h" #include "ts_catalog/catalog.h" +#include "ts_catalog/chunk_column_stats.h" #include "ts_catalog/compression_chunk_size.h" #include "ts_catalog/compression_settings.h" @@ -287,6 +289,7 @@ compress_chunk(Oid in_table, Oid out_table, int insert_options) TupleDesc in_desc = RelationGetDescr(in_rel); TupleDesc out_desc = RelationGetDescr(out_rel); + /* Before calling row compressor relation should be segmented and sorted as configured * by compress_segmentby and compress_orderby. * Cost of sorting can be mitigated if we find an existing BTREE index defined for @@ -494,7 +497,8 @@ compress_chunk(Oid in_table, Oid out_table, int insert_options) tuplesort_end(sorted_rel); } - row_compressor_close(&row_compressor); + cstat.colstats = row_compressor_close(&row_compressor); + if (!ts_guc_enable_delete_after_compression) { DEBUG_WAITPOINT("compression_done_before_truncate_uncompressed"); @@ -720,6 +724,17 @@ build_column_map(CompressionSettings *settings, Relation uncompressed_table, bool is_segmentby = ts_array_is_member(settings->fd.segmentby, NameStr(attr->attname)); bool is_orderby = ts_array_is_member(settings->fd.orderby, NameStr(attr->attname)); + SegmentMetaMinMaxBuilder *segment_min_max_builder = NULL; + TypeCacheEntry *type = lookup_type_cache(attr->atttypid, TYPECACHE_LT_OPR); + + if (OidIsValid(type->lt_opr)) + { + /* Always run the min-max builder if the type allows. It is + * useful to collect, e.g., column stats for chunk skipping. */ + segment_min_max_builder = + segment_meta_min_max_builder_create(attr->atttypid, attr->attcollation); + } + if (!is_segmentby) { if (compressed_column_attr->atttypid != compressed_data_type_oid) @@ -742,18 +757,6 @@ build_column_map(CompressionSettings *settings, Relation uncompressed_table, int16 segment_min_attr_offset = segment_min_attr_number - 1; int16 segment_max_attr_offset = segment_max_attr_number - 1; - SegmentMetaMinMaxBuilder *segment_min_max_builder = NULL; - if (segment_min_attr_number != InvalidAttrNumber || - segment_max_attr_number != InvalidAttrNumber) - { - Ensure(segment_min_attr_number != InvalidAttrNumber, - "could not find the min metadata column"); - Ensure(segment_max_attr_number != InvalidAttrNumber, - "could not find the min metadata column"); - segment_min_max_builder = - segment_meta_min_max_builder_create(attr->atttypid, attr->attcollation); - } - Ensure(!is_orderby || segment_min_max_builder != NULL, "orderby columns must have minmax metadata"); @@ -777,6 +780,7 @@ build_column_map(CompressionSettings *settings, Relation uncompressed_table, .segmentby_column_index = index, .min_metadata_attr_offset = -1, .max_metadata_attr_offset = -1, + .min_max_metadata_builder = segment_min_max_builder, }; } } @@ -965,7 +969,9 @@ row_compressor_append_row(RowCompressor *row_compressor, TupleTableSlot *row) bool is_null; Datum val; - /* if there is no compressor, this must be a segmenter, so just skip */ + /* if there is no compressor, this must be a segmenter, so just + * skip. Note that, for segmentby columns, min/max stats are updated + * per segment (on flush) for instead of per row. */ if (compressor == NULL) continue; @@ -1024,11 +1030,9 @@ row_compressor_flush(RowCompressor *row_compressor, CommandId mycid, bool change row_compressor->compressed_values[compressed_col] = PointerGetDatum(compressed_data); - if (column->min_max_metadata_builder != NULL) + if (column->min_max_metadata_builder != NULL && column->min_metadata_attr_offset >= 0 && + column->max_metadata_attr_offset >= 0) { - Assert(column->min_metadata_attr_offset >= 0); - Assert(column->max_metadata_attr_offset >= 0); - if (!segment_meta_min_max_builder_empty(column->min_max_metadata_builder)) { Assert(compressed_data != NULL); @@ -1050,6 +1054,17 @@ row_compressor_flush(RowCompressor *row_compressor, CommandId mycid, bool change } else if (column->segment_info != NULL) { + /* Update min/max for segmentby column. It is done here on flush + * instead of per row since for the segment the value is always + * the same. */ + if (column->min_max_metadata_builder != NULL) + { + if (column->segment_info->is_null) + segment_meta_min_max_builder_update_null(column->min_max_metadata_builder); + else + segment_meta_min_max_builder_update_val(column->min_max_metadata_builder, + column->segment_info->val); + } row_compressor->compressed_values[compressed_col] = column->segment_info->val; row_compressor->compressed_is_null[compressed_col] = column->segment_info->is_null; } @@ -1091,23 +1106,31 @@ row_compressor_flush(RowCompressor *row_compressor, CommandId mycid, bool change /* don't free the segment-bys if we've overflowed the row, we still need them */ if (column->segment_info != NULL && !changed_groups) + { + /* Still need to reset the min/max builder to save per-column + * min/max based on per-segment min/max. */ + segment_meta_min_max_builder_reset(column->min_max_metadata_builder); continue; + } if (column->compressor != NULL || !column->segment_info->typ_by_val) pfree(DatumGetPointer(row_compressor->compressed_values[compressed_col])); if (column->min_max_metadata_builder != NULL) { - /* segment_meta_min_max_builder_reset will free the values, so clear here */ - if (!row_compressor->compressed_is_null[column->min_metadata_attr_offset]) + /* segment_meta_min_max_builder_reset will free the values, so clear here */ + if (column->min_metadata_attr_offset > 0 && column->max_metadata_attr_offset > 0) { - row_compressor->compressed_values[column->min_metadata_attr_offset] = 0; - row_compressor->compressed_is_null[column->min_metadata_attr_offset] = true; - } - if (!row_compressor->compressed_is_null[column->max_metadata_attr_offset]) - { - row_compressor->compressed_values[column->max_metadata_attr_offset] = 0; - row_compressor->compressed_is_null[column->max_metadata_attr_offset] = true; + if (!row_compressor->compressed_is_null[column->min_metadata_attr_offset]) + { + row_compressor->compressed_values[column->min_metadata_attr_offset] = 0; + row_compressor->compressed_is_null[column->min_metadata_attr_offset] = true; + } + if (!row_compressor->compressed_is_null[column->max_metadata_attr_offset]) + { + row_compressor->compressed_values[column->max_metadata_attr_offset] = 0; + row_compressor->compressed_is_null[column->max_metadata_attr_offset] = true; + } } segment_meta_min_max_builder_reset(column->min_max_metadata_builder); } @@ -1133,12 +1156,37 @@ row_compressor_reset(RowCompressor *row_compressor) row_compressor->first_iteration = true; } -void +ChunkColumnStats ** row_compressor_close(RowCompressor *row_compressor) { if (row_compressor->bistate) FreeBulkInsertState(row_compressor->bistate); CatalogCloseIndexes(row_compressor->resultRelInfo); + + ChunkColumnStats **colstats = + palloc(sizeof(ChunkColumnStats *) * row_compressor->n_input_columns); + + /* Get any relation-level stats (min and max) collected during compression + * and return it to caller */ + for (int i = 0; i < row_compressor->n_input_columns; i++) + { + const PerColumn *column = &row_compressor->per_column[i]; + SegmentMetaMinMaxBuilder *builder = column->min_max_metadata_builder; + + if (builder && segment_meta_has_relation_stats(builder)) + { + ChunkColumnStats *colstat = palloc(sizeof(ChunkColumnStats)); + colstat->minmax[0] = segment_meta_min_max_builder_relation_min(builder); + colstat->minmax[1] = segment_meta_min_max_builder_relation_max(builder); + colstats[i] = colstat; + } + else + { + colstats[i] = NULL; + } + } + + return colstats; } /****************** diff --git a/tsl/src/compression/compression.h b/tsl/src/compression/compression.h index 0ede01bdf0e..faeb8fb08a3 100644 --- a/tsl/src/compression/compression.h +++ b/tsl/src/compression/compression.h @@ -11,6 +11,7 @@ #include #include #include +#include #include typedef struct BulkInsertStateData *BulkInsertState; @@ -200,11 +201,14 @@ typedef enum CompressionAlgorithm _MAX_NUM_COMPRESSION_ALGORITHMS = 128, } CompressionAlgorithm; +typedef struct ChunkColumnStats ChunkColumnStats; + typedef struct CompressionStats { int64 rowcnt_pre_compression; int64 rowcnt_post_compression; int64 rowcnt_frozen; + ChunkColumnStats **colstats; } CompressionStats; typedef struct PerColumn @@ -368,7 +372,7 @@ extern void row_compressor_init(CompressionSettings *settings, RowCompressor *ro int16 num_columns_in_compressed_table, bool need_bistate, int insert_options); extern void row_compressor_reset(RowCompressor *row_compressor); -extern void row_compressor_close(RowCompressor *row_compressor); +extern struct ChunkColumnStats **row_compressor_close(RowCompressor *row_compressor); extern void row_compressor_append_sorted_rows(RowCompressor *row_compressor, Tuplesortstate *sorted_rel, TupleDesc sorted_desc, Relation in_rel); diff --git a/tsl/src/compression/segment_meta.c b/tsl/src/compression/segment_meta.c index f51fceb3145..030f4144e48 100644 --- a/tsl/src/compression/segment_meta.c +++ b/tsl/src/compression/segment_meta.c @@ -11,6 +11,7 @@ #include #include +#include "debug_assert.h" #include "segment_meta.h" SegmentMetaMinMaxBuilder * @@ -42,34 +43,37 @@ segment_meta_min_max_builder_create(Oid type_oid, Oid collation) return builder; } -void -segment_meta_min_max_builder_update_val(SegmentMetaMinMaxBuilder *builder, Datum val) +static Datum +compare_values(SegmentMetaMinMaxBuilder *builder, Datum old_val, Datum val, bool is_min) { int cmp; - if (builder->empty) - { - builder->min = datumCopy(val, builder->type_by_val, builder->type_len); - builder->max = datumCopy(val, builder->type_by_val, builder->type_len); - builder->empty = false; - return; - } + cmp = ApplySortComparator(old_val, false, val, false, &builder->ssup); - cmp = ApplySortComparator(builder->min, false, val, false, &builder->ssup); - if (cmp > 0) + if ((is_min && cmp > 0) || (!is_min && cmp < 0)) { if (!builder->type_by_val) - pfree(DatumGetPointer(builder->min)); - builder->min = datumCopy(val, builder->type_by_val, builder->type_len); + pfree(DatumGetPointer(old_val)); + + return datumCopy(val, builder->type_by_val, builder->type_len); } - cmp = ApplySortComparator(builder->max, false, val, false, &builder->ssup); - if (cmp < 0) + return old_val; +} + +void +segment_meta_min_max_builder_update_val(SegmentMetaMinMaxBuilder *builder, Datum val) +{ + if (builder->empty) { - if (!builder->type_by_val) - pfree(DatumGetPointer(builder->max)); + builder->min = datumCopy(val, builder->type_by_val, builder->type_len); builder->max = datumCopy(val, builder->type_by_val, builder->type_len); + builder->empty = false; + return; } + + builder->min = compare_values(builder, builder->min, val, true); + builder->max = compare_values(builder, builder->max, val, false); } void @@ -83,6 +87,28 @@ segment_meta_min_max_builder_reset(SegmentMetaMinMaxBuilder *builder) { if (!builder->empty) { + /* Update the relation min and max. Those values need to live on a + * memory context that has the same lifetime as the builder itself. */ + MemoryContext oldcxt = MemoryContextSwitchTo(builder->ssup.ssup_cxt); + + if (!builder->has_relation_stats) + { + builder->relation_min = + datumCopy(builder->min, builder->type_by_val, builder->type_len); + builder->relation_max = + datumCopy(builder->max, builder->type_by_val, builder->type_len); + builder->has_relation_stats = true; + } + else + { + builder->relation_min = + compare_values(builder, builder->relation_min, builder->min, true); + builder->relation_max = + compare_values(builder, builder->relation_max, builder->max, false); + } + + MemoryContextSwitchTo(oldcxt); + if (!builder->type_by_val) { pfree(DatumGetPointer(builder->min)); @@ -90,43 +116,71 @@ segment_meta_min_max_builder_reset(SegmentMetaMinMaxBuilder *builder) } builder->min = 0; builder->max = 0; + builder->empty = true; } - builder->empty = true; + builder->has_null = false; } -Datum -segment_meta_min_max_builder_min(SegmentMetaMinMaxBuilder *builder) +static Datum +get_unpacked_value(SegmentMetaMinMaxBuilder *builder, Datum *value, bool valid_condition, + const char *valuetype) { - if (builder->empty) - elog(ERROR, "trying to get min from an empty builder"); + Datum unpacked = *value; + + Ensure(valid_condition, "no data for %s stats", valuetype); + if (builder->type_len == -1) { - Datum unpacked = PointerGetDatum(PG_DETOAST_DATUM_PACKED(builder->min)); - if (builder->min != unpacked) - pfree(DatumGetPointer(builder->min)); - builder->min = unpacked; + unpacked = PointerGetDatum(PG_DETOAST_DATUM_PACKED(*value)); + + if (*value != unpacked) + pfree(DatumGetPointer(*value)); + + *value = unpacked; } - return builder->min; + + return unpacked; +} + +Datum +segment_meta_min_max_builder_min(SegmentMetaMinMaxBuilder *builder) +{ + return get_unpacked_value(builder, &builder->min, !builder->empty, "min"); } Datum segment_meta_min_max_builder_max(SegmentMetaMinMaxBuilder *builder) { - if (builder->empty) - elog(ERROR, "trying to get max from an empty builder"); - if (builder->type_len == -1) - { - Datum unpacked = PointerGetDatum(PG_DETOAST_DATUM_PACKED(builder->max)); - if (builder->max != unpacked) - pfree(DatumGetPointer(builder->max)); - builder->max = unpacked; - } - return builder->max; + return get_unpacked_value(builder, &builder->max, !builder->empty, "max"); +} + +Datum +segment_meta_min_max_builder_relation_min(SegmentMetaMinMaxBuilder *builder) +{ + return get_unpacked_value(builder, + &builder->relation_min, + builder->has_relation_stats, + "relation min"); +} + +Datum +segment_meta_min_max_builder_relation_max(SegmentMetaMinMaxBuilder *builder) +{ + return get_unpacked_value(builder, + &builder->relation_max, + builder->has_relation_stats, + "relation max"); } bool -segment_meta_min_max_builder_empty(SegmentMetaMinMaxBuilder *builder) +segment_meta_min_max_builder_empty(const SegmentMetaMinMaxBuilder *builder) { return builder->empty; } + +bool +segment_meta_has_relation_stats(const SegmentMetaMinMaxBuilder *builder) +{ + return builder->has_relation_stats; +} diff --git a/tsl/src/compression/segment_meta.h b/tsl/src/compression/segment_meta.h index 2b8876dd4ed..4d95103cb43 100644 --- a/tsl/src/compression/segment_meta.h +++ b/tsl/src/compression/segment_meta.h @@ -19,8 +19,16 @@ typedef struct SegmentMetaMinMaxBuilder SortSupportData ssup; bool type_by_val; int16 type_len; + + /* Per-segment min and max */ Datum min; Datum max; + + /* Per-relation min and max. Updated on every segment processed (builder + * reset). */ + Datum relation_min; + Datum relation_max; + Datum has_relation_stats; } SegmentMetaMinMaxBuilder; typedef struct SegmentMetaMinMaxBuilder SegmentMetaMinMaxBuilder; @@ -31,6 +39,11 @@ void segment_meta_min_max_builder_update_null(SegmentMetaMinMaxBuilder *builder) Datum segment_meta_min_max_builder_min(SegmentMetaMinMaxBuilder *builder); Datum segment_meta_min_max_builder_max(SegmentMetaMinMaxBuilder *builder); -bool segment_meta_min_max_builder_empty(SegmentMetaMinMaxBuilder *builder); + +Datum segment_meta_min_max_builder_relation_min(SegmentMetaMinMaxBuilder *builder); +Datum segment_meta_min_max_builder_relation_max(SegmentMetaMinMaxBuilder *builder); void segment_meta_min_max_builder_reset(SegmentMetaMinMaxBuilder *builder); + +bool segment_meta_min_max_builder_empty(const SegmentMetaMinMaxBuilder *builder); +bool segment_meta_has_relation_stats(const SegmentMetaMinMaxBuilder *builder); diff --git a/tsl/test/expected/chunk_column_stats-15.out b/tsl/test/expected/chunk_column_stats-15.out index 5a647d16df8..a854cdce3bd 100644 --- a/tsl/test/expected/chunk_column_stats-15.out +++ b/tsl/test/expected/chunk_column_stats-15.out @@ -633,7 +633,6 @@ SELECT * from _timescaledb_catalog.chunk_column_stats; -- Compressing a chunk again should calculate proper ranges SELECT compress_chunk(:'CH_NAME'); -WARNING: no index on "sensor_id" found for column range on chunk "_hyper_1_1_chunk" compress_chunk ---------------------------------------- _timescaledb_internal._hyper_1_1_chunk @@ -666,7 +665,6 @@ SELECT * from _timescaledb_catalog.chunk_column_stats; -- Check that truncate resets the entry in the catalog SELECT compress_chunk(:'CH_NAME'); -WARNING: no index on "sensor_id" found for column range on chunk "_hyper_1_1_chunk" compress_chunk ---------------------------------------- _timescaledb_internal._hyper_1_1_chunk diff --git a/tsl/test/expected/chunk_column_stats-16.out b/tsl/test/expected/chunk_column_stats-16.out index 5a647d16df8..a854cdce3bd 100644 --- a/tsl/test/expected/chunk_column_stats-16.out +++ b/tsl/test/expected/chunk_column_stats-16.out @@ -633,7 +633,6 @@ SELECT * from _timescaledb_catalog.chunk_column_stats; -- Compressing a chunk again should calculate proper ranges SELECT compress_chunk(:'CH_NAME'); -WARNING: no index on "sensor_id" found for column range on chunk "_hyper_1_1_chunk" compress_chunk ---------------------------------------- _timescaledb_internal._hyper_1_1_chunk @@ -666,7 +665,6 @@ SELECT * from _timescaledb_catalog.chunk_column_stats; -- Check that truncate resets the entry in the catalog SELECT compress_chunk(:'CH_NAME'); -WARNING: no index on "sensor_id" found for column range on chunk "_hyper_1_1_chunk" compress_chunk ---------------------------------------- _timescaledb_internal._hyper_1_1_chunk diff --git a/tsl/test/expected/chunk_column_stats-17.out b/tsl/test/expected/chunk_column_stats-17.out index 5a647d16df8..a854cdce3bd 100644 --- a/tsl/test/expected/chunk_column_stats-17.out +++ b/tsl/test/expected/chunk_column_stats-17.out @@ -633,7 +633,6 @@ SELECT * from _timescaledb_catalog.chunk_column_stats; -- Compressing a chunk again should calculate proper ranges SELECT compress_chunk(:'CH_NAME'); -WARNING: no index on "sensor_id" found for column range on chunk "_hyper_1_1_chunk" compress_chunk ---------------------------------------- _timescaledb_internal._hyper_1_1_chunk @@ -666,7 +665,6 @@ SELECT * from _timescaledb_catalog.chunk_column_stats; -- Check that truncate resets the entry in the catalog SELECT compress_chunk(:'CH_NAME'); -WARNING: no index on "sensor_id" found for column range on chunk "_hyper_1_1_chunk" compress_chunk ---------------------------------------- _timescaledb_internal._hyper_1_1_chunk