From 3d63a0e288814719861a5c8396b6e4fa88670d21 Mon Sep 17 00:00:00 2001 From: Jan Nidzwetzki Date: Fri, 21 Jul 2023 08:10:05 +0200 Subject: [PATCH] Reduce WAL activity by freezing tuples immediately When we compress a chunk, we create a new compressed chunk for storing the compressed data. So far, the tuples were just inserted into the compressed chunk and frozen by a later vacuum run. However, freezing tuples causes WAL activity can be optimized because the compressed chunk is created in the same transaction as the tuples. This patch reduces the WAL activity by storing these tuples directly as frozen and preventing a freeze operation in the future. This approach is similar to PostgreSQL's COPY FREEZE. --- .unreleased/feature_5890 | 1 + sql/pre_install/tables.sql | 1 + sql/updates/latest-dev.sql | 50 + sql/updates/reverse-dev.sql | 50 + src/telemetry/stats.c | 1 + src/telemetry/stats.h | 1 + src/telemetry/telemetry.c | 4 + src/ts_catalog/catalog.h | 2 + tsl/src/chunk_copy.c | 1 + tsl/src/compression/api.c | 41 +- tsl/src/compression/compression.c | 20 +- tsl/src/compression/compression.h | 7 +- .../{bgw_custom.out => bgw_custom-13.out} | 0 tsl/test/expected/bgw_custom-14.out | 1072 +++++++++++++++++ tsl/test/expected/bgw_custom-15.out | 1072 +++++++++++++++++ tsl/test/expected/bgw_custom-16.out | 1072 +++++++++++++++++ tsl/test/expected/compression.out | 66 +- ...ression_bgw.out => compression_bgw-13.out} | 0 tsl/test/expected/compression_bgw-14.out | 657 ++++++++++ tsl/test/expected/compression_bgw-15.out | 657 ++++++++++ tsl/test/expected/compression_bgw-16.out | 657 ++++++++++ tsl/test/expected/telemetry_stats-13.out | 748 ++++++++++++ tsl/test/expected/telemetry_stats-14.out | 748 ++++++++++++ tsl/test/expected/telemetry_stats-15.out | 748 ++++++++++++ tsl/test/expected/telemetry_stats-16.out | 748 ++++++++++++ tsl/test/expected/telemetry_stats.out | 736 ----------- .../isolation/expected/compression_freeze.out | 145 +++ tsl/test/isolation/specs/CMakeLists.txt | 2 +- .../isolation/specs/compression_freeze.spec | 68 ++ tsl/test/sql/.gitignore | 3 + tsl/test/sql/CMakeLists.txt | 10 +- .../sql/{bgw_custom.sql => bgw_custom.sql.in} | 0 ...ression_bgw.sql => compression_bgw.sql.in} | 0 ...metry_stats.sql => telemetry_stats.sql.in} | 0 tsl/test/src/test_compression.c | 3 +- 35 files changed, 8604 insertions(+), 787 deletions(-) create mode 100644 .unreleased/feature_5890 rename tsl/test/expected/{bgw_custom.out => bgw_custom-13.out} (100%) create mode 100644 tsl/test/expected/bgw_custom-14.out create mode 100644 tsl/test/expected/bgw_custom-15.out create mode 100644 tsl/test/expected/bgw_custom-16.out rename tsl/test/expected/{compression_bgw.out => compression_bgw-13.out} (100%) create mode 100644 tsl/test/expected/compression_bgw-14.out create mode 100644 tsl/test/expected/compression_bgw-15.out create mode 100644 tsl/test/expected/compression_bgw-16.out create mode 100644 tsl/test/expected/telemetry_stats-13.out create mode 100644 tsl/test/expected/telemetry_stats-14.out create mode 100644 tsl/test/expected/telemetry_stats-15.out create mode 100644 tsl/test/expected/telemetry_stats-16.out delete mode 100644 tsl/test/expected/telemetry_stats.out create mode 100644 tsl/test/isolation/expected/compression_freeze.out create mode 100644 tsl/test/isolation/specs/compression_freeze.spec rename tsl/test/sql/{bgw_custom.sql => bgw_custom.sql.in} (100%) rename tsl/test/sql/{compression_bgw.sql => compression_bgw.sql.in} (100%) rename tsl/test/sql/{telemetry_stats.sql => telemetry_stats.sql.in} (100%) diff --git a/.unreleased/feature_5890 b/.unreleased/feature_5890 new file mode 100644 index 00000000000..b8ac85886dc --- /dev/null +++ b/.unreleased/feature_5890 @@ -0,0 +1 @@ +Implements: #5890 Reduce WAL activity by freezing compressed tuples immediately diff --git a/sql/pre_install/tables.sql b/sql/pre_install/tables.sql index 5a2d36d0a11..85d45f1728f 100644 --- a/sql/pre_install/tables.sql +++ b/sql/pre_install/tables.sql @@ -495,6 +495,7 @@ CREATE TABLE _timescaledb_catalog.compression_chunk_size ( compressed_index_size bigint NOT NULL, numrows_pre_compression bigint, numrows_post_compression bigint, + numrows_frozen_immediately bigint, -- table constraints CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id), CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE, diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql index b8d310f00f3..90775394522 100644 --- a/sql/updates/latest-dev.sql +++ b/sql/updates/latest-dev.sql @@ -179,3 +179,53 @@ DROP TABLE _timescaledb_internal.tmp_chunk_seq_value; GRANT SELECT ON _timescaledb_catalog.chunk_id_seq TO PUBLIC; GRANT SELECT ON _timescaledb_catalog.chunk TO PUBLIC; -- end recreate _timescaledb_catalog.chunk table -- + +-- +-- Rebuild the catalog table `_timescaledb_catalog.compression_chunk_size` to +-- add new column `numrows_frozen_immediately` +-- +CREATE TABLE _timescaledb_internal.compression_chunk_size_tmp + AS SELECT * from _timescaledb_catalog.compression_chunk_size; + +-- Drop depended views +-- We assume that '_timescaledb_internal.compressed_chunk_stats' was already dropped in this update +-- (see above) + +-- Drop table +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.compression_chunk_size; +DROP TABLE _timescaledb_catalog.compression_chunk_size; + +CREATE TABLE _timescaledb_catalog.compression_chunk_size ( + chunk_id integer NOT NULL, + compressed_chunk_id integer NOT NULL, + uncompressed_heap_size bigint NOT NULL, + uncompressed_toast_size bigint NOT NULL, + uncompressed_index_size bigint NOT NULL, + compressed_heap_size bigint NOT NULL, + compressed_toast_size bigint NOT NULL, + compressed_index_size bigint NOT NULL, + numrows_pre_compression bigint, + numrows_post_compression bigint, + numrows_frozen_immediately bigint, + -- table constraints + CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id), + CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE, + CONSTRAINT compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE +); + +INSERT INTO _timescaledb_catalog.compression_chunk_size +(chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, + uncompressed_index_size, compressed_heap_size, compressed_toast_size, + compressed_index_size, numrows_pre_compression, numrows_post_compression, numrows_frozen_immediately) +SELECT chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, + uncompressed_index_size, compressed_heap_size, compressed_toast_size, + compressed_index_size, numrows_pre_compression, numrows_post_compression, 0 +FROM _timescaledb_internal.compression_chunk_size_tmp; + +DROP TABLE _timescaledb_internal.compression_chunk_size_tmp; + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', ''); + +GRANT SELECT ON _timescaledb_catalog.compression_chunk_size TO PUBLIC; + +-- End modify `_timescaledb_catalog.compression_chunk_size` diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql index 688fbe20eb2..b1c25e80c6a 100644 --- a/sql/updates/reverse-dev.sql +++ b/sql/updates/reverse-dev.sql @@ -124,3 +124,53 @@ GRANT SELECT ON _timescaledb_catalog.chunk_id_seq TO PUBLIC; GRANT SELECT ON _timescaledb_catalog.chunk TO PUBLIC; -- end recreate _timescaledb_catalog.chunk table -- + + +-- +-- Rebuild the catalog table `_timescaledb_catalog.compression_chunk_size` to +-- remove column `numrows_frozen_immediately` +-- +CREATE TABLE _timescaledb_internal.compression_chunk_size_tmp + AS SELECT * from _timescaledb_catalog.compression_chunk_size; + +-- Drop depended views +-- We assume that '_timescaledb_internal.compressed_chunk_stats' was already dropped in this update +-- (see above) + +-- Drop table +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.compression_chunk_size; +DROP TABLE _timescaledb_catalog.compression_chunk_size; + +CREATE TABLE _timescaledb_catalog.compression_chunk_size ( + chunk_id integer NOT NULL, + compressed_chunk_id integer NOT NULL, + uncompressed_heap_size bigint NOT NULL, + uncompressed_toast_size bigint NOT NULL, + uncompressed_index_size bigint NOT NULL, + compressed_heap_size bigint NOT NULL, + compressed_toast_size bigint NOT NULL, + compressed_index_size bigint NOT NULL, + numrows_pre_compression bigint, + numrows_post_compression bigint, + -- table constraints + CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id), + CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE, + CONSTRAINT compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE +); + +INSERT INTO _timescaledb_catalog.compression_chunk_size +(chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, + uncompressed_index_size, compressed_heap_size, compressed_toast_size, + compressed_index_size, numrows_pre_compression, numrows_post_compression) +SELECT chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, + uncompressed_index_size, compressed_heap_size, compressed_toast_size, + compressed_index_size, numrows_pre_compression, numrows_post_compression +FROM _timescaledb_internal.compression_chunk_size_tmp; + +DROP TABLE _timescaledb_internal.compression_chunk_size_tmp; + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', ''); + +GRANT SELECT ON _timescaledb_catalog.compression_chunk_size TO PUBLIC; + +-- End modify `_timescaledb_catalog.compression_chunk_size` diff --git a/src/telemetry/stats.c b/src/telemetry/stats.c index f6b3932c6f0..316e735bcd8 100644 --- a/src/telemetry/stats.c +++ b/src/telemetry/stats.c @@ -335,6 +335,7 @@ add_chunk_stats(HyperStats *stats, Form_pg_class class, const Chunk *chunk, stats->uncompressed_toast_size += fd_compr->uncompressed_toast_size; stats->uncompressed_row_count += fd_compr->numrows_pre_compression; stats->compressed_row_count += fd_compr->numrows_post_compression; + stats->compressed_row_frozen_immediately_count += fd_compr->numrows_frozen_immediately; /* Also add compressed sizes to total number for entire table */ stats->storage.relsize.heap_size += fd_compr->compressed_heap_size; diff --git a/src/telemetry/stats.h b/src/telemetry/stats.h index d765e906ad0..777779181ec 100644 --- a/src/telemetry/stats.h +++ b/src/telemetry/stats.h @@ -64,6 +64,7 @@ typedef struct HyperStats int64 compressed_indexes_size; int64 compressed_toast_size; int64 compressed_row_count; + int64 compressed_row_frozen_immediately_count; int64 uncompressed_heap_size; int64 uncompressed_indexes_size; int64 uncompressed_toast_size; diff --git a/src/telemetry/telemetry.c b/src/telemetry/telemetry.c index 8b0c099674a..16d270ab784 100644 --- a/src/telemetry/telemetry.c +++ b/src/telemetry/telemetry.c @@ -604,6 +604,7 @@ format_iso8601(Datum value) #define REQ_RELKIND_COMPRESSED_TOAST_SIZE "compressed_toast_size" #define REQ_RELKIND_COMPRESSED_INDEXES_SIZE "compressed_indexes_size" #define REQ_RELKIND_COMPRESSED_ROWCOUNT "compressed_row_count" +#define REQ_RELKIND_COMPRESSED_ROWCOUNT_FROZEN_IMMEDIATELY "compressed_row_count_frozen_immediately" #define REQ_RELKIND_CAGG_ON_DISTRIBUTED_HYPERTABLE_COUNT "num_caggs_on_distributed_hypertables" #define REQ_RELKIND_CAGG_USES_REAL_TIME_AGGREGATION_COUNT "num_caggs_using_real_time_aggregation" @@ -639,6 +640,9 @@ add_compression_stats_object(JsonbParseState *parse_state, StatsRelType reltype, ts_jsonb_add_int64(parse_state, REQ_RELKIND_COMPRESSED_INDEXES_SIZE, hs->compressed_indexes_size); + ts_jsonb_add_int64(parse_state, + REQ_RELKIND_COMPRESSED_ROWCOUNT_FROZEN_IMMEDIATELY, + hs->compressed_row_frozen_immediately_count); ts_jsonb_add_int64(parse_state, REQ_RELKIND_UNCOMPRESSED_ROWCOUNT, hs->uncompressed_row_count); ts_jsonb_add_int64(parse_state, REQ_RELKIND_UNCOMPRESSED_HEAP_SIZE, hs->uncompressed_heap_size); ts_jsonb_add_int64(parse_state, diff --git a/src/ts_catalog/catalog.h b/src/ts_catalog/catalog.h index c7c683e22b8..aa91278579b 100644 --- a/src/ts_catalog/catalog.h +++ b/src/ts_catalog/catalog.h @@ -1289,6 +1289,7 @@ typedef enum Anum_compression_chunk_size Anum_compression_chunk_size_compressed_index_size, Anum_compression_chunk_size_numrows_pre_compression, Anum_compression_chunk_size_numrows_post_compression, + Anum_compression_chunk_size_numrows_frozen_immediately, _Anum_compression_chunk_size_max, } Anum_compression_chunk_size; @@ -1306,6 +1307,7 @@ typedef struct FormData_compression_chunk_size int64 compressed_index_size; int64 numrows_pre_compression; int64 numrows_post_compression; + int64 numrows_frozen_immediately; } FormData_compression_chunk_size; typedef FormData_compression_chunk_size *Form_compression_chunk_size; diff --git a/tsl/src/chunk_copy.c b/tsl/src/chunk_copy.c index ae0ffcdee0e..b81107347ae 100644 --- a/tsl/src/chunk_copy.c +++ b/tsl/src/chunk_copy.c @@ -549,6 +549,7 @@ chunk_copy_get_source_compressed_chunk_stats(ChunkCopy *cc) cc->fd_ccs.compressed_index_size = atoll(PQgetvalue(res, 0, 5)); cc->fd_ccs.numrows_pre_compression = atoll(PQgetvalue(res, 0, 6)); cc->fd_ccs.numrows_post_compression = atoll(PQgetvalue(res, 0, 7)); + cc->fd_ccs.numrows_frozen_immediately = 0; ts_dist_cmd_close_response(dist_res); } diff --git a/tsl/src/compression/api.c b/tsl/src/compression/api.c index 3113fa27518..0914d08de18 100644 --- a/tsl/src/compression/api.c +++ b/tsl/src/compression/api.c @@ -58,7 +58,8 @@ typedef struct CompressChunkCxt static void compression_chunk_size_catalog_insert(int32 src_chunk_id, const RelationSize *src_size, int32 compress_chunk_id, const RelationSize *compress_size, - int64 rowcnt_pre_compression, int64 rowcnt_post_compression) + int64 rowcnt_pre_compression, int64 rowcnt_post_compression, + int64 rowcnt_frozen) { Catalog *catalog = ts_catalog_get(); Relation rel; @@ -93,6 +94,8 @@ compression_chunk_size_catalog_insert(int32 src_chunk_id, const RelationSize *sr Int64GetDatum(rowcnt_pre_compression); values[AttrNumberGetAttrOffset(Anum_compression_chunk_size_numrows_post_compression)] = Int64GetDatum(rowcnt_post_compression); + values[AttrNumberGetAttrOffset(Anum_compression_chunk_size_numrows_frozen_immediately)] = + Int64GetDatum(rowcnt_frozen); ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx); ts_catalog_insert_values(rel, desc, values, nulls); @@ -487,6 +490,27 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid) compress_ht_chunk = ts_chunk_get_by_id(mergable_chunk->fd.compressed_chunk_id, true); result_chunk_id = mergable_chunk->table_id; } + + /* Since the compressed relation is created in the same transaction as the tuples that will be + * written by the compressor, we can insert the tuple directly in frozen state. This is the same + * logic as performed in COPY INSERT FROZEN. + * + * Note: Tuples inserted with HEAP_INSERT_FROZEN become immediately visible to all transactions + * (they violate the MVCC pattern). So, this flag can only be used when creating the compressed + * chunk in the same transaction as the compressed tuples are inserted. + * + * If this isn't the case, then tuples can be seen multiple times by parallel readers - once in + * the uncompressed part of the hypertable (since they are not deleted in the transaction) and + * once in the compressed part of the hypertable since the MVCC semantic is violated due to the + * flag. + * + * In contrast, when the compressed chunk part is created in the same transaction as the tuples + * are written, the compressed chunk (i.e., the catalog entry) becomes visible to other + * transactions only after the transaction that performs the compression is commited and + * the uncompressed chunk is truncated. + */ + int insert_options = new_compressed_chunk ? HEAP_INSERT_FROZEN : 0; + /* convert list to array of pointers for compress_chunk */ colinfo_array = palloc(sizeof(ColumnCompressionInfo *) * htcols_listlen); foreach (lc, htcols_list) @@ -498,7 +522,8 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid) cstat = compress_chunk(cxt.srcht_chunk->table_id, compress_ht_chunk->table_id, colinfo_array, - htcols_listlen); + htcols_listlen, + insert_options); /* Drop all FK constraints on the uncompressed chunk. This is needed to allow * cascading deleted data in FK-referenced tables, while blocking deleting data @@ -514,7 +539,8 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid) compress_ht_chunk->fd.id, &after_size, cstat.rowcnt_pre_compression, - cstat.rowcnt_post_compression); + cstat.rowcnt_post_compression, + cstat.rowcnt_frozen); /* Copy chunk constraints (including fkey) to compressed chunk. * Do this after compressing the chunk to avoid holding strong, unnecessary locks on the @@ -811,7 +837,8 @@ tsl_create_compressed_chunk(PG_FUNCTION_ARGS) compress_ht_chunk->fd.id, &compressed_size, numrows_pre_compression, - numrows_post_compression); + numrows_post_compression, + 0); chunk_was_compressed = ts_chunk_is_compressed(cxt.srcht_chunk); ts_chunk_set_compressed_chunk(cxt.srcht_chunk, compress_ht_chunk->fd.id); @@ -1071,7 +1098,8 @@ tsl_get_compressed_chunk_index_for_recompression(PG_FUNCTION_ARGS) in_column_offsets, compressed_rel_tupdesc->natts, true /*need_bistate*/, - true /*reset_sequence*/); + true /*reset_sequence*/, + 0 /*insert options*/); /* * Keep the ExclusiveLock on the compressed chunk. This lock will be requested @@ -1370,7 +1398,8 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS) in_column_offsets, compressed_rel_tupdesc->natts, true /*need_bistate*/, - true /*reset_sequence*/); + true /*reset_sequence*/, + 0 /*insert options*/); /* create an array of the segmentby column offsets in the compressed chunk */ int16 *segmentby_column_offsets_compressed = diff --git a/tsl/src/compression/compression.c b/tsl/src/compression/compression.c index 8c8836d9904..c786cc6cbcc 100644 --- a/tsl/src/compression/compression.c +++ b/tsl/src/compression/compression.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -53,6 +54,7 @@ #include "create.h" #include "custom_type_cache.h" #include "arrow_c_data_interface.h" +#include "debug_assert.h" #include "debug_point.h" #include "deltadelta.h" #include "dictionary.h" @@ -223,7 +225,7 @@ truncate_relation(Oid table_oid) CompressionStats compress_chunk(Oid in_table, Oid out_table, const ColumnCompressionInfo **column_compression_info, - int num_compression_infos) + int num_compression_infos, int insert_options) { int n_keys; ListCell *lc; @@ -399,7 +401,8 @@ compress_chunk(Oid in_table, Oid out_table, const ColumnCompressionInfo **column in_column_offsets, out_desc->natts, true /*need_bistate*/, - false /*reset_sequence*/); + false /*reset_sequence*/, + insert_options); if (matched_index_rel != NULL) { @@ -441,12 +444,19 @@ compress_chunk(Oid in_table, Oid out_table, const ColumnCompressionInfo **column } row_compressor_finish(&row_compressor); + DEBUG_WAITPOINT("compression_done_before_truncate_uncompressed"); truncate_relation(in_table); table_close(out_rel, NoLock); table_close(in_rel, NoLock); cstat.rowcnt_pre_compression = row_compressor.rowcnt_pre_compression; cstat.rowcnt_post_compression = row_compressor.num_compressed_rows; + + if ((insert_options & HEAP_INSERT_FROZEN) == HEAP_INSERT_FROZEN) + cstat.rowcnt_frozen = row_compressor.num_compressed_rows; + else + cstat.rowcnt_frozen = 0; + return cstat; } @@ -836,7 +846,8 @@ void row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompressed_tuple_desc, Relation compressed_table, int num_compression_infos, const ColumnCompressionInfo **column_compression_info, int16 *in_column_offsets, - int16 num_columns_in_compressed_table, bool need_bistate, bool reset_sequence) + int16 num_columns_in_compressed_table, bool need_bistate, bool reset_sequence, + int insert_options) { TupleDesc out_desc = RelationGetDescr(compressed_table); int col; @@ -883,6 +894,7 @@ row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompressed_tuple_ .sequence_num = SEQUENCE_NUM_GAP, .reset_sequence = reset_sequence, .first_iteration = true, + .insert_options = insert_options, }; memset(row_compressor->compressed_is_null, 1, sizeof(bool) * num_columns_in_compressed_table); @@ -1214,7 +1226,7 @@ row_compressor_flush(RowCompressor *row_compressor, CommandId mycid, bool change heap_insert(row_compressor->compressed_table, compressed_tuple, mycid, - 0 /*=options*/, + row_compressor->insert_options /*=options*/, row_compressor->bistate); if (row_compressor->resultRelInfo->ri_NumIndices > 0) { diff --git a/tsl/src/compression/compression.h b/tsl/src/compression/compression.h index db850d40583..1ab39075484 100644 --- a/tsl/src/compression/compression.h +++ b/tsl/src/compression/compression.h @@ -201,6 +201,7 @@ typedef struct CompressionStats { int64 rowcnt_pre_compression; int64 rowcnt_post_compression; + int64 rowcnt_frozen; } CompressionStats; typedef struct PerColumn @@ -264,6 +265,8 @@ typedef struct RowCompressor bool reset_sequence; /* flag for checking if we are working on the first tuple */ bool first_iteration; + /* the heap insert options */ + int insert_options; } RowCompressor; /* SegmentFilter is used for filtering segments based on qualifiers */ @@ -312,7 +315,7 @@ pg_attribute_unused() assert_num_compression_algorithms_sane(void) extern CompressionStorage compression_get_toast_storage(CompressionAlgorithms algo); extern CompressionStats compress_chunk(Oid in_table, Oid out_table, const ColumnCompressionInfo **column_compression_info, - int num_compression_infos); + int num_compression_infos, int insert_options); extern void decompress_chunk(Oid in_table, Oid out_table); extern DecompressionIterator *(*tsl_get_decompression_iterator_init( @@ -354,7 +357,7 @@ extern void row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompr Relation compressed_table, int num_compression_infos, const ColumnCompressionInfo **column_compression_info, int16 *column_offsets, int16 num_columns_in_compressed_table, - bool need_bistate, bool reset_sequence); + bool need_bistate, bool reset_sequence, int insert_options); extern void row_compressor_finish(RowCompressor *row_compressor); extern void populate_per_compressed_columns_from_data(PerCompressedColumn *per_compressed_cols, int16 num_cols, Datum *compressed_datums, diff --git a/tsl/test/expected/bgw_custom.out b/tsl/test/expected/bgw_custom-13.out similarity index 100% rename from tsl/test/expected/bgw_custom.out rename to tsl/test/expected/bgw_custom-13.out diff --git a/tsl/test/expected/bgw_custom-14.out b/tsl/test/expected/bgw_custom-14.out new file mode 100644 index 00000000000..6f308e7d361 --- /dev/null +++ b/tsl/test/expected/bgw_custom-14.out @@ -0,0 +1,1072 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE TABLE custom_log(job_id int, args jsonb, extra text, runner NAME DEFAULT CURRENT_ROLE); +CREATE OR REPLACE FUNCTION custom_func(jobid int, args jsonb) RETURNS VOID LANGUAGE SQL AS +$$ + INSERT INTO custom_log VALUES($1, $2, 'custom_func'); +$$; +CREATE OR REPLACE FUNCTION custom_func_definer(jobid int, args jsonb) RETURNS VOID LANGUAGE SQL AS +$$ + INSERT INTO custom_log VALUES($1, $2, 'security definer'); +$$ SECURITY DEFINER; +CREATE OR REPLACE PROCEDURE custom_proc(job_id int, args jsonb) LANGUAGE SQL AS +$$ + INSERT INTO custom_log VALUES($1, $2, 'custom_proc'); +$$; +-- procedure with transaction handling +CREATE OR REPLACE PROCEDURE custom_proc2(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + INSERT INTO custom_log VALUES($1, $2, 'custom_proc2 1 COMMIT ' || (args->>'type')); + COMMIT; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc2 2 ROLLBACK ' || (args->>'type')); + ROLLBACK; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc2 3 COMMIT ' || (args->>'type')); + COMMIT; +END +$$; +\set ON_ERROR_STOP 0 +-- test bad input +SELECT add_job(NULL, '1h'); +ERROR: function or procedure cannot be NULL +SELECT add_job(0, '1h'); +ERROR: function or procedure with OID 0 does not exist +-- this will return an error about Oid 4294967295 +-- while regproc is unsigned int postgres has an implicit cast from int to regproc +SELECT add_job(-1, '1h'); +ERROR: function or procedure with OID 4294967295 does not exist +SELECT add_job('invalid_func', '1h'); +ERROR: function "invalid_func" does not exist at character 16 +SELECT add_job('custom_func', NULL); +ERROR: schedule interval cannot be NULL +SELECT add_job('custom_func', 'invalid interval'); +ERROR: invalid input syntax for type interval: "invalid interval" at character 31 +\set ON_ERROR_STOP 1 +select '2000-01-01 00:00:00+00' as time_zero \gset +SELECT add_job('custom_func','1h', config:='{"type":"function"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1000 +(1 row) + +SELECT add_job('custom_proc','1h', config:='{"type":"procedure"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1001 +(1 row) + +SELECT add_job('custom_proc2','1h', config:= '{"type":"procedure"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1002 +(1 row) + +SELECT add_job('custom_func', '1h', config:='{"type":"function"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1003 +(1 row) + +SELECT add_job('custom_func_definer', '1h', config:='{"type":"function"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1004 +(1 row) + +-- exclude the telemetry[1] and job error retention[2] jobs +-- job 2 may have already run which will set its next_start field thus making the test flaky +SELECT * FROM timescaledb_information.jobs WHERE job_id NOT IN (1,2) ORDER BY 1; + job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name +--------+----------------------------+-------------------+-------------+-------------+--------------+-------------+---------------------+-------------------+-----------+----------------+-----------------------+------------------------------+------------------------------+-------------------+-----------------+--------------+------------ + 1000 | User-Defined Action [1000] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_func | default_perm_user | t | t | {"type": "function"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1001 | User-Defined Action [1001] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_proc | default_perm_user | t | t | {"type": "procedure"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1002 | User-Defined Action [1002] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_proc2 | default_perm_user | t | t | {"type": "procedure"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1003 | User-Defined Action [1003] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_func | default_perm_user | t | t | {"type": "function"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1004 | User-Defined Action [1004] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_func_definer | default_perm_user | t | t | {"type": "function"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | +(5 rows) + +SELECT count(*) FROM _timescaledb_config.bgw_job WHERE config->>'type' IN ('procedure', 'function'); + count +------- + 5 +(1 row) + +\set ON_ERROR_STOP 0 +-- test bad input +CALL run_job(NULL); +ERROR: job ID cannot be NULL +CALL run_job(-1); +ERROR: job -1 not found +\set ON_ERROR_STOP 1 +CALL run_job(1000); +CALL run_job(1001); +CALL run_job(1002); +CALL run_job(1003); +CALL run_job(1004); +SELECT * FROM custom_log ORDER BY job_id, extra; + job_id | args | extra | runner +--------+-----------------------+---------------------------------+------------------- + 1000 | {"type": "function"} | custom_func | default_perm_user + 1001 | {"type": "procedure"} | custom_proc | default_perm_user + 1002 | {"type": "procedure"} | custom_proc2 1 COMMIT procedure | default_perm_user + 1002 | {"type": "procedure"} | custom_proc2 3 COMMIT procedure | default_perm_user + 1003 | {"type": "function"} | custom_func | default_perm_user + 1004 | {"type": "function"} | security definer | default_perm_user +(6 rows) + +\set ON_ERROR_STOP 0 +-- test bad input +SELECT delete_job(NULL); + delete_job +------------ + +(1 row) + +SELECT delete_job(-1); +ERROR: job -1 not found +\set ON_ERROR_STOP 1 +-- We keep job 1000 for some additional checks. +SELECT delete_job(1001); + delete_job +------------ + +(1 row) + +SELECT delete_job(1002); + delete_job +------------ + +(1 row) + +SELECT delete_job(1003); + delete_job +------------ + +(1 row) + +SELECT delete_job(1004); + delete_job +------------ + +(1 row) + +-- check jobs got removed +SELECT count(*) FROM timescaledb_information.jobs WHERE job_id >= 1001; + count +------- + 0 +(1 row) + +\c :TEST_DBNAME :ROLE_SUPERUSER +\set ON_ERROR_STOP 0 +-- test bad input +SELECT alter_job(NULL, if_exists => false); +ERROR: job ID cannot be NULL +SELECT alter_job(-1, if_exists => false); +ERROR: job -1 not found +\set ON_ERROR_STOP 1 +-- test bad input but don't fail +SELECT alter_job(NULL, if_exists => true); +NOTICE: job 0 not found, skipping + alter_job +----------- + +(1 row) + +SELECT alter_job(-1, if_exists => true); +NOTICE: job -1 not found, skipping + alter_job +----------- + +(1 row) + +-- test altering job with NULL config +SELECT job_id FROM alter_job(1000,scheduled:=false); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+---------------------- + f | {"type": "function"} +(1 row) + +-- test updating job settings +SELECT job_id FROM alter_job(1000,config:='{"test":"test"}'); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+------------------ + f | {"test": "test"} +(1 row) + +SELECT job_id FROM alter_job(1000,scheduled:=true); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+------------------ + t | {"test": "test"} +(1 row) + +SELECT job_id FROM alter_job(1000,scheduled:=false); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+------------------ + f | {"test": "test"} +(1 row) + +-- Done with job 1000 now, so remove it. +SELECT delete_job(1000); + delete_job +------------ + +(1 row) + +--test for #2793 +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +-- background workers are disabled, so the job will not run -- +SELECT add_job( proc=>'custom_func', + schedule_interval=>'1h', initial_start =>'2018-01-01 10:00:00-05') AS job_id_1 \gset +SELECT job_id, next_start, scheduled, schedule_interval +FROM timescaledb_information.jobs WHERE job_id > 1000; + job_id | next_start | scheduled | schedule_interval +--------+------------------------------+-----------+------------------- + 1005 | Mon Jan 01 07:00:00 2018 PST | t | @ 1 hour +(1 row) + +\x +SELECT * FROM timescaledb_information.job_stats WHERE job_id > 1000; +-[ RECORD 1 ]----------+----------------------------- +hypertable_schema | +hypertable_name | +job_id | 1005 +last_run_started_at | -infinity +last_successful_finish | -infinity +last_run_status | +job_status | Scheduled +last_run_duration | +next_start | Mon Jan 01 07:00:00 2018 PST +total_runs | 0 +total_successes | 0 +total_failures | 0 + +\x +SELECT delete_job(:job_id_1); + delete_job +------------ + +(1 row) + +-- tests for #3545 +CREATE FUNCTION wait_for_job_to_run(job_param_id INTEGER, expected_runs INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + r RECORD; +BEGIN + FOR i in 1..spins + LOOP + SELECT total_successes, total_failures FROM _timescaledb_internal.bgw_job_stat WHERE job_id=job_param_id INTO r; + IF (r.total_failures > 0) THEN + RAISE INFO 'wait_for_job_to_run: job execution failed'; + RETURN false; + ELSEIF (r.total_successes = expected_runs) THEN + RETURN true; + ELSEIF (r.total_successes > expected_runs) THEN + RAISE 'num_runs > expected'; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RAISE INFO 'wait_for_job_to_run: timeout after % tries', spins; + RETURN false; +END +$BODY$; +TRUNCATE custom_log; +-- Nested procedure call +CREATE OR REPLACE PROCEDURE custom_proc_nested(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + INSERT INTO custom_log VALUES($1, $2, 'custom_proc_nested 1 COMMIT'); + COMMIT; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc_nested 2 ROLLBACK'); + ROLLBACK; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc_nested 3 COMMIT'); + COMMIT; +END +$$; +CREATE OR REPLACE PROCEDURE custom_proc3(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + CALL custom_proc_nested(job_id, args); +END +$$; +CREATE OR REPLACE PROCEDURE custom_proc4(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + INSERT INTO custom_log VALUES($1, $2, 'custom_proc4 1 COMMIT'); + COMMIT; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc4 2 ROLLBACK'); + ROLLBACK; + RAISE EXCEPTION 'forced exception'; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc4 3 ABORT'); + COMMIT; +END +$$; +CREATE OR REPLACE PROCEDURE custom_proc5(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + CALL refresh_continuous_aggregate('conditions_summary_daily', '2021-08-01 00:00', '2021-08-31 00:00'); +END +$$; +-- Remove any default jobs, e.g., telemetry +\c :TEST_DBNAME :ROLE_SUPERUSER +TRUNCATE _timescaledb_config.bgw_job RESTART IDENTITY CASCADE; +NOTICE: truncate cascades to table "bgw_job_stat" +NOTICE: truncate cascades to table "bgw_policy_chunk_stats" +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT add_job('custom_proc2', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_1 \gset +SELECT add_job('custom_proc3', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_2 \gset +\c :TEST_DBNAME :ROLE_SUPERUSER +-- Start Background Workers +SELECT _timescaledb_functions.start_background_workers(); + start_background_workers +-------------------------- + t +(1 row) + +-- Wait for jobs +SELECT wait_for_job_to_run(:job_id_1, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT wait_for_job_to_run(:job_id_2, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +-- Check results +SELECT * FROM custom_log ORDER BY job_id, extra; + job_id | args | extra | runner +--------+-----------------------+---------------------------------+------------------- + 1000 | {"type": "procedure"} | custom_proc2 1 COMMIT procedure | default_perm_user + 1000 | {"type": "procedure"} | custom_proc2 3 COMMIT procedure | default_perm_user + 1001 | {"type": "procedure"} | custom_proc_nested 1 COMMIT | default_perm_user + 1001 | {"type": "procedure"} | custom_proc_nested 3 COMMIT | default_perm_user +(4 rows) + +-- Delete previous jobs +SELECT delete_job(:job_id_1); + delete_job +------------ + +(1 row) + +SELECT delete_job(:job_id_2); + delete_job +------------ + +(1 row) + +TRUNCATE custom_log; +-- Forced Exception +SELECT add_job('custom_proc4', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_3 \gset +SELECT wait_for_job_to_run(:job_id_3, 1); +INFO: wait_for_job_to_run: job execution failed + wait_for_job_to_run +--------------------- + f +(1 row) + +-- Check results +SELECT * FROM custom_log ORDER BY job_id, extra; + job_id | args | extra | runner +--------+-----------------------+-----------------------+------------ + 1002 | {"type": "procedure"} | custom_proc4 1 COMMIT | super_user +(1 row) + +-- Delete previous jobs +SELECT delete_job(:job_id_3); + delete_job +------------ + +(1 row) + +CREATE TABLE conditions ( + time TIMESTAMP NOT NULL, + location TEXT NOT NULL, + location2 char(10) NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL +) WITH (autovacuum_enabled = FALSE); +SELECT create_hypertable('conditions', 'time', chunk_time_interval := '15 days'::interval); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + create_hypertable +------------------------- + (1,public,conditions,t) +(1 row) + +ALTER TABLE conditions + SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'location', + timescaledb.compress_orderby = 'time' +); +INSERT INTO conditions +SELECT generate_series('2021-08-01 00:00'::timestamp, '2021-08-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75; +-- Chunk compress stats +SELECT * FROM _timescaledb_internal.compressed_chunk_stats ORDER BY chunk_name; + hypertable_schema | hypertable_name | chunk_schema | chunk_name | compression_status | uncompressed_heap_size | uncompressed_index_size | uncompressed_toast_size | uncompressed_total_size | compressed_heap_size | compressed_index_size | compressed_toast_size | compressed_total_size +-------------------+-----------------+-----------------------+------------------+--------------------+------------------------+-------------------------+-------------------------+-------------------------+----------------------+-----------------------+-----------------------+----------------------- + public | conditions | _timescaledb_internal | _hyper_1_1_chunk | Uncompressed | | | | | | | | + public | conditions | _timescaledb_internal | _hyper_1_2_chunk | Uncompressed | | | | | | | | + public | conditions | _timescaledb_internal | _hyper_1_3_chunk | Uncompressed | | | | | | | | +(3 rows) + +-- Compression policy +SELECT add_compression_policy('conditions', interval '1 day') AS job_id_4 \gset +SELECT wait_for_job_to_run(:job_id_4, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +-- Chunk compress stats +SELECT * FROM _timescaledb_internal.compressed_chunk_stats ORDER BY chunk_name; + hypertable_schema | hypertable_name | chunk_schema | chunk_name | compression_status | uncompressed_heap_size | uncompressed_index_size | uncompressed_toast_size | uncompressed_total_size | compressed_heap_size | compressed_index_size | compressed_toast_size | compressed_total_size +-------------------+-----------------+-----------------------+------------------+--------------------+------------------------+-------------------------+-------------------------+-------------------------+----------------------+-----------------------+-----------------------+----------------------- + public | conditions | _timescaledb_internal | _hyper_1_1_chunk | Compressed | 8192 | 16384 | 8192 | 32768 | 16384 | 16384 | 8192 | 40960 + public | conditions | _timescaledb_internal | _hyper_1_2_chunk | Compressed | 8192 | 16384 | 8192 | 32768 | 16384 | 16384 | 8192 | 40960 + public | conditions | _timescaledb_internal | _hyper_1_3_chunk | Compressed | 8192 | 16384 | 8192 | 32768 | 16384 | 16384 | 8192 | 40960 +(3 rows) + +--TEST compression job after inserting data into previously compressed chunk +INSERT INTO conditions +SELECT generate_series('2021-08-01 00:00'::timestamp, '2021-08-31 00:00'::timestamp, '1 day'), 'NYC', 'nycity', 40, 40; +SELECT id, table_name, status from _timescaledb_catalog.chunk +where hypertable_id = (select id from _timescaledb_catalog.hypertable + where table_name = 'conditions') +order by id; + id | table_name | status +----+------------------+-------- + 1 | _hyper_1_1_chunk | 9 + 2 | _hyper_1_2_chunk | 9 + 3 | _hyper_1_3_chunk | 9 +(3 rows) + +--running job second time, wait for it to complete +select t.schedule_interval FROM alter_job(:job_id_4, next_start=> now() ) t; + schedule_interval +------------------- + @ 12 hours +(1 row) + +SELECT wait_for_job_to_run(:job_id_4, 2); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT id, table_name, status from _timescaledb_catalog.chunk +where hypertable_id = (select id from _timescaledb_catalog.hypertable + where table_name = 'conditions') +order by id; + id | table_name | status +----+------------------+-------- + 1 | _hyper_1_1_chunk | 1 + 2 | _hyper_1_2_chunk | 1 + 3 | _hyper_1_3_chunk | 1 +(3 rows) + +-- Drop the compression job +SELECT delete_job(:job_id_4); + delete_job +------------ + +(1 row) + +-- Decompress chunks before create the cagg +SELECT decompress_chunk(c) FROM show_chunks('conditions') c; + decompress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk +(3 rows) + +-- TEST Continuous Aggregate job +CREATE MATERIALIZED VIEW conditions_summary_daily +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature), + MAX(temperature), + MIN(temperature) +FROM conditions +GROUP BY location, bucket +WITH NO DATA; +-- Refresh Continous Aggregate by Job +SELECT add_job('custom_proc5', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_5 \gset +SELECT wait_for_job_to_run(:job_id_5, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT count(*) FROM conditions_summary_daily; + count +------- + 62 +(1 row) + +-- TESTs for alter_job_set_hypertable_id API +SELECT _timescaledb_functions.alter_job_set_hypertable_id( :job_id_5, NULL); + alter_job_set_hypertable_id +----------------------------- + 1004 +(1 row) + +SELECT id, proc_name, hypertable_id +FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; + id | proc_name | hypertable_id +------+--------------+--------------- + 1004 | custom_proc5 | +(1 row) + +-- error case, try to associate with a PG relation +\set ON_ERROR_STOP 0 +SELECT _timescaledb_functions.alter_job_set_hypertable_id( :job_id_5, 'custom_log'); +ERROR: relation "custom_log" is not a hypertable or continuous aggregate +\set ON_ERROR_STOP 1 +-- TEST associate the cagg with the job +SELECT _timescaledb_functions.alter_job_set_hypertable_id( :job_id_5, 'conditions_summary_daily'::regclass); + alter_job_set_hypertable_id +----------------------------- + 1004 +(1 row) + +SELECT id, proc_name, hypertable_id +FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; + id | proc_name | hypertable_id +------+--------------+--------------- + 1004 | custom_proc5 | 3 +(1 row) + +--verify that job is dropped when cagg is dropped +DROP MATERIALIZED VIEW conditions_summary_daily; +NOTICE: drop cascades to table _timescaledb_internal._hyper_3_10_chunk +SELECT id, proc_name, hypertable_id +FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; + id | proc_name | hypertable_id +----+-----------+--------------- +(0 rows) + +-- Cleanup +DROP TABLE conditions; +DROP TABLE custom_log; +-- Stop Background Workers +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +SELECT _timescaledb_functions.restart_background_workers(); + restart_background_workers +---------------------------- + t +(1 row) + +\set ON_ERROR_STOP 0 +-- add test for custom jobs with custom check functions +-- create the functions/procedures to be used as checking functions +CREATE OR REPLACE PROCEDURE test_config_check_proc(config jsonb) +LANGUAGE PLPGSQL +AS $$ +DECLARE + drop_after interval; +BEGIN + SELECT jsonb_object_field_text (config, 'drop_after')::interval INTO STRICT drop_after; + IF drop_after IS NULL THEN + RAISE EXCEPTION 'Config must be not NULL and have drop_after'; + END IF ; +END +$$; +CREATE OR REPLACE FUNCTION test_config_check_func(config jsonb) RETURNS VOID +AS $$ +DECLARE + drop_after interval; +BEGIN + IF config IS NULL THEN + RETURN; + END IF; + SELECT jsonb_object_field_text (config, 'drop_after')::interval INTO STRICT drop_after; + IF drop_after IS NULL THEN + RAISE EXCEPTION 'Config can be NULL but must have drop_after if not'; + END IF ; +END +$$ LANGUAGE PLPGSQL; +-- step 2, create a procedure to run as a custom job +CREATE OR REPLACE PROCEDURE test_proc_with_check(job_id int, config jsonb) +LANGUAGE PLPGSQL +AS $$ +BEGIN + RAISE NOTICE 'Will only print this if config passes checks, my config is %', config; +END +$$; +-- step 3, add the job with the config check function passed as argument +-- test procedures, should get an unsupported error +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_proc'::regproc); +ERROR: unsupported function type +-- test functions +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func'::regproc); +ERROR: Config can be NULL but must have drop_after if not +select add_job('test_proc_with_check', '5 secs', config => NULL, check_config => 'test_config_check_func'::regproc); + add_job +--------- + 1005 +(1 row) + +select add_job('test_proc_with_check', '5 secs', config => '{"drop_after": "chicken"}', check_config => 'test_config_check_func'::regproc); +ERROR: invalid input syntax for type interval: "chicken" +select add_job('test_proc_with_check', '5 secs', config => '{"drop_after": "2 weeks"}', check_config => 'test_config_check_func'::regproc) +as job_with_func_check_id \gset +--- test alter_job +select alter_job(:job_with_func_check_id, config => '{"drop_after":"chicken"}'); +ERROR: invalid input syntax for type interval: "chicken" +select config from alter_job(:job_with_func_check_id, config => '{"drop_after":"5 years"}'); + config +--------------------------- + {"drop_after": "5 years"} +(1 row) + +-- test that jobs with an incorrect check function signature will not be registered +-- these are all incorrect function signatures +CREATE OR REPLACE FUNCTION test_config_check_func_0args() RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'I take no arguments and will validate anything you give me!'; +END +$$ LANGUAGE PLPGSQL; +CREATE OR REPLACE FUNCTION test_config_check_func_2args(config jsonb, intarg int) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'I take two arguments (jsonb, int) and I should fail to run!'; +END +$$ LANGUAGE PLPGSQL; +CREATE OR REPLACE FUNCTION test_config_check_func_intarg(config int) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'I take one argument which is an integer and I should fail to run!'; +END +$$ LANGUAGE PLPGSQL; +-- -- this should fail, it has an incorrect check function +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_0args'::regproc); +ERROR: function or procedure public.test_config_check_func_0args(config jsonb) not found +-- -- so should this +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_2args'::regproc); +ERROR: function or procedure public.test_config_check_func_2args(config jsonb) not found +-- and this +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_intarg'::regproc); +ERROR: function or procedure public.test_config_check_func_intarg(config jsonb) not found +-- and this fails as it calls a nonexistent function +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_nonexistent_check_func'::regproc); +ERROR: function "test_nonexistent_check_func" does not exist at character 82 +-- when called with a valid check function and a NULL config no check should occur +CREATE OR REPLACE FUNCTION test_config_check_func(config jsonb) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'This message will get printed for both NULL and not NULL config'; +END +$$ LANGUAGE PLPGSQL; +SET client_min_messages = NOTICE; +-- check done for both NULL and non-NULL config +select add_job('test_proc_with_check', '5 secs', config => NULL, check_config => 'test_config_check_func'::regproc); +NOTICE: This message will get printed for both NULL and not NULL config + add_job +--------- + 1007 +(1 row) + +-- check done +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func'::regproc) as job_id \gset +NOTICE: This message will get printed for both NULL and not NULL config +-- check function not returning void +CREATE OR REPLACE FUNCTION test_config_check_func_returns_int(config jsonb) RETURNS INT +AS $$ +BEGIN + raise notice 'I print a message, and then I return least(1,2)'; + RETURN LEAST(1, 2); +END +$$ LANGUAGE PLPGSQL; +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_returns_int'::regproc, +initial_start => :'time_zero'::timestamptz) as job_id_int \gset +NOTICE: I print a message, and then I return least(1,2) +-- drop the registered check function, verify that alter_job will work and print a warning that +-- the check is being skipped due to the check function missing +ALTER FUNCTION test_config_check_func RENAME TO renamed_func; +select job_id, schedule_interval, config, check_config from alter_job(:job_id, schedule_interval => '1 hour'); +WARNING: function public.test_config_check_func(config jsonb) not found, skipping config validation for job 1008 + job_id | schedule_interval | config | check_config +--------+-------------------+--------+------------------------------- + 1008 | @ 1 hour | {} | public.test_config_check_func +(1 row) + +DROP FUNCTION test_config_check_func_returns_int; +select job_id, schedule_interval, config, check_config from alter_job(:job_id_int, config => '{"field":"value"}'); +WARNING: function public.test_config_check_func_returns_int(config jsonb) not found, skipping config validation for job 1009 + job_id | schedule_interval | config | check_config +--------+-------------------+--------------------+------------------------------------------- + 1009 | @ 5 secs | {"field": "value"} | public.test_config_check_func_returns_int +(1 row) + +-- rename the check function and then call alter_job to register the new name +select job_id, schedule_interval, config, check_config from alter_job(:job_id, check_config => 'renamed_func'::regproc); +NOTICE: This message will get printed for both NULL and not NULL config + job_id | schedule_interval | config | check_config +--------+-------------------+--------+--------------------- + 1008 | @ 1 hour | {} | public.renamed_func +(1 row) + +-- run alter again, should get a config check +select job_id, schedule_interval, config, check_config from alter_job(:job_id, config => '{}'); +NOTICE: This message will get printed for both NULL and not NULL config + job_id | schedule_interval | config | check_config +--------+-------------------+--------+--------------------- + 1008 | @ 1 hour | {} | public.renamed_func +(1 row) + +-- do not drop the current check function but register a new one +CREATE OR REPLACE FUNCTION substitute_check_func(config jsonb) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'This message is a substitute of the previously printed one'; +END +$$ LANGUAGE PLPGSQL; +-- register the new check +select job_id, schedule_interval, config, check_config from alter_job(:job_id, check_config => 'substitute_check_func'); +NOTICE: This message is a substitute of the previously printed one + job_id | schedule_interval | config | check_config +--------+-------------------+--------+------------------------------ + 1008 | @ 1 hour | {} | public.substitute_check_func +(1 row) + +select job_id, schedule_interval, config, check_config from alter_job(:job_id, config => '{}'); +NOTICE: This message is a substitute of the previously printed one + job_id | schedule_interval | config | check_config +--------+-------------------+--------+------------------------------ + 1008 | @ 1 hour | {} | public.substitute_check_func +(1 row) + +RESET client_min_messages; +-- test an oid that doesn't exist +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 17424217::regproc); +ERROR: function with OID 17424217 does not exist +\c :TEST_DBNAME :ROLE_SUPERUSER +-- test a function with insufficient privileges +create schema test_schema; +create role user_noexec with login; +grant usage on schema test_schema to user_noexec; +CREATE OR REPLACE FUNCTION test_schema.test_config_check_func_privileges(config jsonb) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'This message will only get printed if privileges suffice'; +END +$$ LANGUAGE PLPGSQL; +revoke execute on function test_schema.test_config_check_func_privileges from public; +-- verify the user doesn't have execute permissions on the function +select has_function_privilege('user_noexec', 'test_schema.test_config_check_func_privileges(jsonb)', 'execute'); + has_function_privilege +------------------------ + f +(1 row) + +\c :TEST_DBNAME user_noexec +-- user_noexec should not have exec permissions on this function +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_schema.test_config_check_func_privileges'::regproc); +ERROR: permission denied for function "test_config_check_func_privileges" +\c :TEST_DBNAME :ROLE_SUPERUSER +-- check that alter_job rejects a check function with invalid signature +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'renamed_func', +initial_start => :'time_zero'::timestamptz) as job_id_alter \gset +NOTICE: This message will get printed for both NULL and not NULL config +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter, check_config => 'test_config_check_func_0args'); +ERROR: function or procedure public.test_config_check_func_0args(config jsonb) not found +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter); +NOTICE: This message will get printed for both NULL and not NULL config + job_id | schedule_interval | config | check_config +--------+-------------------+--------+--------------------- + 1010 | @ 5 secs | {} | public.renamed_func +(1 row) + +-- test that we can unregister the check function +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter, check_config => 0); + job_id | schedule_interval | config | check_config +--------+-------------------+--------+-------------- + 1010 | @ 5 secs | {} | +(1 row) + +-- no message printed now +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter, config => '{}'); + job_id | schedule_interval | config | check_config +--------+-------------------+--------+-------------- + 1010 | @ 5 secs | {} | +(1 row) + +-- test the case where we have a background job that registers jobs with a check fn +CREATE OR REPLACE PROCEDURE add_scheduled_jobs_with_check(job_id int, config jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + perform add_job('test_proc_with_check', schedule_interval => '10 secs', config => '{}', check_config => 'renamed_func'); +END +$$; +select add_job('add_scheduled_jobs_with_check', schedule_interval => '1 hour') as last_job_id \gset +-- wait for enough time +SELECT wait_for_job_to_run(:last_job_id, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +select total_runs, total_successes, last_run_status from timescaledb_information.job_stats where job_id = :last_job_id; + total_runs | total_successes | last_run_status +------------+-----------------+----------------- + 1 | 1 | Success +(1 row) + +-- test coverage for alter_job +-- registering an invalid oid +select alter_job(:job_id_alter, check_config => 123456789::regproc); +ERROR: function with OID 123456789 does not exist +-- registering a function with insufficient privileges +\c :TEST_DBNAME user_noexec +select * from add_job('test_proc_with_check', '5 secs', config => '{}') as job_id_owner \gset +select * from alter_job(:job_id_owner, check_config => 'test_schema.test_config_check_func_privileges'::regproc); +ERROR: permission denied for function "test_config_check_func_privileges" +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP SCHEMA test_schema CASCADE; +NOTICE: drop cascades to function test_schema.test_config_check_func_privileges(jsonb) +-- Delete all jobs with that owner before we can drop the user. +DELETE FROM _timescaledb_config.bgw_job WHERE owner = 'user_noexec'::regrole; +DROP ROLE user_noexec; +-- test with aggregate check proc +create function jsonb_add (j1 jsonb, j2 jsonb) returns jsonb +AS $$ +BEGIN + RETURN j1 || j2; +END +$$ LANGUAGE PLPGSQL; +CREATE AGGREGATE sum_jsb (jsonb) +( + sfunc = jsonb_add, + stype = jsonb, + initcond = '{}' +); +-- for test coverage, check unsupported aggregate type +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'sum_jsb'::regproc); +ERROR: unsupported function type +-- Cleanup jobs +TRUNCATE _timescaledb_config.bgw_job CASCADE; +NOTICE: truncate cascades to table "bgw_job_stat" +NOTICE: truncate cascades to table "bgw_policy_chunk_stats" +-- github issue 4610 +CREATE TABLE sensor_data +( + time timestamptz not null, + sensor_id integer not null, + cpu double precision null, + temperature double precision null +); +SELECT FROM create_hypertable('sensor_data','time'); +-- +(1 row) + +SELECT '2022-10-06 00:00:00+00' as start_date_sd \gset +INSERT INTO sensor_data + SELECT + time + (INTERVAL '1 minute' * random()) AS time, + sensor_id, + random() AS cpu, + random()* 100 AS temperature + FROM + generate_series(:'start_date_sd'::timestamptz - INTERVAL '1 months', :'start_date_sd'::timestamptz - INTERVAL '1 week', INTERVAL '1 minute') AS g1(time), + generate_series(1, 50, 1 ) AS g2(sensor_id) + ORDER BY + time; +-- enable compression +ALTER TABLE sensor_data SET (timescaledb.compress, timescaledb.compress_orderby = 'time DESC'); +-- create new chunks +INSERT INTO sensor_data + SELECT + time + (INTERVAL '1 minute' * random()) AS time, + sensor_id, + random() AS cpu, + random()* 100 AS temperature + FROM + generate_series(:'start_date_sd'::timestamptz - INTERVAL '2 months', :'start_date_sd'::timestamptz - INTERVAL '2 week', INTERVAL '2 minute') AS g1(time), + generate_series(1, 30, 1 ) AS g2(sensor_id) + ORDER BY + time; +-- get the name of a new uncompressed chunk +SELECT chunk_name AS new_uncompressed_chunk_name + FROM timescaledb_information.chunks + WHERE hypertable_name = 'sensor_data' AND NOT is_compressed LIMIT 1 \gset +-- change compression status so that this chunk is skipped when policy is run +update _timescaledb_catalog.chunk set status=3 where table_name = :'new_uncompressed_chunk_name'; +-- add new compression policy job +SELECT add_compression_policy('sensor_data', INTERVAL '1' minute) AS compressjob_id \gset +-- set recompress to true +SELECT alter_job(id,config:=jsonb_set(config,'{recompress}', 'true')) FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + alter_job +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1014,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""recompress"": true, ""hypertable_id"": 4, ""compress_after"": ""@ 1 min""}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +-- verify that there are other uncompressed new chunks that need to be compressed +SELECT count(*) > 1 + FROM timescaledb_information.chunks + WHERE hypertable_name = 'sensor_data' AND NOT is_compressed; + ?column? +---------- + t +(1 row) + +-- disable notice/warning as the new_uncompressed_chunk_name +-- is dynamic and it will be printed in those messages. +SET client_min_messages TO ERROR; +CALL run_job(:compressjob_id); +SET client_min_messages TO NOTICE; +-- check compression status is not changed for the chunk whose status was manually updated +SELECT status FROM _timescaledb_catalog.chunk where table_name = :'new_uncompressed_chunk_name'; + status +-------- + 3 +(1 row) + +-- confirm all the other new chunks are now compressed despite +-- facing an error when trying to compress :'new_uncompressed_chunk_name' +SELECT count(*) = 0 + FROM timescaledb_information.chunks + WHERE hypertable_name = 'sensor_data' AND NOT is_compressed; + ?column? +---------- + t +(1 row) + +-- cleanup +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +DROP TABLE sensor_data; +SELECT _timescaledb_functions.restart_background_workers(); + restart_background_workers +---------------------------- + t +(1 row) + +-- Github issue #5537 +-- Proc that waits until the given job enters the expected state +CREATE OR REPLACE PROCEDURE wait_for_job_status(job_param_id INTEGER, expected_status TEXT, spins INTEGER=:TEST_SPINWAIT_ITERS) +LANGUAGE PLPGSQL AS $$ +DECLARE + jobstatus TEXT; +BEGIN + FOR i in 1..spins + LOOP + SELECT job_status FROM timescaledb_information.job_stats WHERE job_id = job_param_id INTO jobstatus; + IF jobstatus = expected_status THEN + RETURN; + END IF; + PERFORM pg_sleep(0.1); + ROLLBACK; + END LOOP; + RAISE EXCEPTION 'wait_for_job_status(%): timeout after % tries', job_param_id, spins; +END; +$$; +-- Proc that sleeps for 1m - to keep the test jobs in running state +CREATE OR REPLACE PROCEDURE proc_that_sleeps(job_id INT, config JSONB) +LANGUAGE PLPGSQL AS +$$ +BEGIN + PERFORM pg_sleep(60); +END +$$; +-- create new jobs and ensure that the second one gets scheduled +-- before the first one by adjusting the initial_start values +SELECT add_job('proc_that_sleeps', '1h', initial_start => now()::timestamptz + interval '2s') AS job_id_1 \gset +SELECT add_job('proc_that_sleeps', '1h', initial_start => now()::timestamptz - interval '2s') AS job_id_2 \gset +-- wait for the jobs to start running job_2 will start running first +CALL wait_for_job_status(:job_id_2, 'Running'); +CALL wait_for_job_status(:job_id_1, 'Running'); +-- add a new job and wait for it to start +SELECT add_job('proc_that_sleeps', '1h') AS job_id_3 \gset +CALL wait_for_job_status(:job_id_3, 'Running'); +-- verify that none of the jobs crashed +SELECT job_id, job_status, next_start, + total_runs, total_successes, total_failures + FROM timescaledb_information.job_stats + WHERE job_id IN (:job_id_1, :job_id_2, :job_id_3) + ORDER BY job_id; + job_id | job_status | next_start | total_runs | total_successes | total_failures +--------+------------+------------+------------+-----------------+---------------- + 1015 | Running | -infinity | 1 | 0 | 0 + 1016 | Running | -infinity | 1 | 0 | 0 + 1017 | Running | -infinity | 1 | 0 | 0 +(3 rows) + +SELECT job_id, err_message + FROM timescaledb_information.job_errors + WHERE job_id IN (:job_id_1, :job_id_2, :job_id_3); + job_id | err_message +--------+------------- +(0 rows) + +-- cleanup +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +CALL wait_for_job_status(:job_id_1, 'Scheduled'); +CALL wait_for_job_status(:job_id_2, 'Scheduled'); +CALL wait_for_job_status(:job_id_3, 'Scheduled'); +SELECT delete_job(:job_id_1); + delete_job +------------ + +(1 row) + +SELECT delete_job(:job_id_2); + delete_job +------------ + +(1 row) + +SELECT delete_job(:job_id_3); + delete_job +------------ + +(1 row) + diff --git a/tsl/test/expected/bgw_custom-15.out b/tsl/test/expected/bgw_custom-15.out new file mode 100644 index 00000000000..6f308e7d361 --- /dev/null +++ b/tsl/test/expected/bgw_custom-15.out @@ -0,0 +1,1072 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE TABLE custom_log(job_id int, args jsonb, extra text, runner NAME DEFAULT CURRENT_ROLE); +CREATE OR REPLACE FUNCTION custom_func(jobid int, args jsonb) RETURNS VOID LANGUAGE SQL AS +$$ + INSERT INTO custom_log VALUES($1, $2, 'custom_func'); +$$; +CREATE OR REPLACE FUNCTION custom_func_definer(jobid int, args jsonb) RETURNS VOID LANGUAGE SQL AS +$$ + INSERT INTO custom_log VALUES($1, $2, 'security definer'); +$$ SECURITY DEFINER; +CREATE OR REPLACE PROCEDURE custom_proc(job_id int, args jsonb) LANGUAGE SQL AS +$$ + INSERT INTO custom_log VALUES($1, $2, 'custom_proc'); +$$; +-- procedure with transaction handling +CREATE OR REPLACE PROCEDURE custom_proc2(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + INSERT INTO custom_log VALUES($1, $2, 'custom_proc2 1 COMMIT ' || (args->>'type')); + COMMIT; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc2 2 ROLLBACK ' || (args->>'type')); + ROLLBACK; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc2 3 COMMIT ' || (args->>'type')); + COMMIT; +END +$$; +\set ON_ERROR_STOP 0 +-- test bad input +SELECT add_job(NULL, '1h'); +ERROR: function or procedure cannot be NULL +SELECT add_job(0, '1h'); +ERROR: function or procedure with OID 0 does not exist +-- this will return an error about Oid 4294967295 +-- while regproc is unsigned int postgres has an implicit cast from int to regproc +SELECT add_job(-1, '1h'); +ERROR: function or procedure with OID 4294967295 does not exist +SELECT add_job('invalid_func', '1h'); +ERROR: function "invalid_func" does not exist at character 16 +SELECT add_job('custom_func', NULL); +ERROR: schedule interval cannot be NULL +SELECT add_job('custom_func', 'invalid interval'); +ERROR: invalid input syntax for type interval: "invalid interval" at character 31 +\set ON_ERROR_STOP 1 +select '2000-01-01 00:00:00+00' as time_zero \gset +SELECT add_job('custom_func','1h', config:='{"type":"function"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1000 +(1 row) + +SELECT add_job('custom_proc','1h', config:='{"type":"procedure"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1001 +(1 row) + +SELECT add_job('custom_proc2','1h', config:= '{"type":"procedure"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1002 +(1 row) + +SELECT add_job('custom_func', '1h', config:='{"type":"function"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1003 +(1 row) + +SELECT add_job('custom_func_definer', '1h', config:='{"type":"function"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1004 +(1 row) + +-- exclude the telemetry[1] and job error retention[2] jobs +-- job 2 may have already run which will set its next_start field thus making the test flaky +SELECT * FROM timescaledb_information.jobs WHERE job_id NOT IN (1,2) ORDER BY 1; + job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name +--------+----------------------------+-------------------+-------------+-------------+--------------+-------------+---------------------+-------------------+-----------+----------------+-----------------------+------------------------------+------------------------------+-------------------+-----------------+--------------+------------ + 1000 | User-Defined Action [1000] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_func | default_perm_user | t | t | {"type": "function"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1001 | User-Defined Action [1001] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_proc | default_perm_user | t | t | {"type": "procedure"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1002 | User-Defined Action [1002] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_proc2 | default_perm_user | t | t | {"type": "procedure"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1003 | User-Defined Action [1003] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_func | default_perm_user | t | t | {"type": "function"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1004 | User-Defined Action [1004] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_func_definer | default_perm_user | t | t | {"type": "function"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | +(5 rows) + +SELECT count(*) FROM _timescaledb_config.bgw_job WHERE config->>'type' IN ('procedure', 'function'); + count +------- + 5 +(1 row) + +\set ON_ERROR_STOP 0 +-- test bad input +CALL run_job(NULL); +ERROR: job ID cannot be NULL +CALL run_job(-1); +ERROR: job -1 not found +\set ON_ERROR_STOP 1 +CALL run_job(1000); +CALL run_job(1001); +CALL run_job(1002); +CALL run_job(1003); +CALL run_job(1004); +SELECT * FROM custom_log ORDER BY job_id, extra; + job_id | args | extra | runner +--------+-----------------------+---------------------------------+------------------- + 1000 | {"type": "function"} | custom_func | default_perm_user + 1001 | {"type": "procedure"} | custom_proc | default_perm_user + 1002 | {"type": "procedure"} | custom_proc2 1 COMMIT procedure | default_perm_user + 1002 | {"type": "procedure"} | custom_proc2 3 COMMIT procedure | default_perm_user + 1003 | {"type": "function"} | custom_func | default_perm_user + 1004 | {"type": "function"} | security definer | default_perm_user +(6 rows) + +\set ON_ERROR_STOP 0 +-- test bad input +SELECT delete_job(NULL); + delete_job +------------ + +(1 row) + +SELECT delete_job(-1); +ERROR: job -1 not found +\set ON_ERROR_STOP 1 +-- We keep job 1000 for some additional checks. +SELECT delete_job(1001); + delete_job +------------ + +(1 row) + +SELECT delete_job(1002); + delete_job +------------ + +(1 row) + +SELECT delete_job(1003); + delete_job +------------ + +(1 row) + +SELECT delete_job(1004); + delete_job +------------ + +(1 row) + +-- check jobs got removed +SELECT count(*) FROM timescaledb_information.jobs WHERE job_id >= 1001; + count +------- + 0 +(1 row) + +\c :TEST_DBNAME :ROLE_SUPERUSER +\set ON_ERROR_STOP 0 +-- test bad input +SELECT alter_job(NULL, if_exists => false); +ERROR: job ID cannot be NULL +SELECT alter_job(-1, if_exists => false); +ERROR: job -1 not found +\set ON_ERROR_STOP 1 +-- test bad input but don't fail +SELECT alter_job(NULL, if_exists => true); +NOTICE: job 0 not found, skipping + alter_job +----------- + +(1 row) + +SELECT alter_job(-1, if_exists => true); +NOTICE: job -1 not found, skipping + alter_job +----------- + +(1 row) + +-- test altering job with NULL config +SELECT job_id FROM alter_job(1000,scheduled:=false); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+---------------------- + f | {"type": "function"} +(1 row) + +-- test updating job settings +SELECT job_id FROM alter_job(1000,config:='{"test":"test"}'); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+------------------ + f | {"test": "test"} +(1 row) + +SELECT job_id FROM alter_job(1000,scheduled:=true); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+------------------ + t | {"test": "test"} +(1 row) + +SELECT job_id FROM alter_job(1000,scheduled:=false); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+------------------ + f | {"test": "test"} +(1 row) + +-- Done with job 1000 now, so remove it. +SELECT delete_job(1000); + delete_job +------------ + +(1 row) + +--test for #2793 +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +-- background workers are disabled, so the job will not run -- +SELECT add_job( proc=>'custom_func', + schedule_interval=>'1h', initial_start =>'2018-01-01 10:00:00-05') AS job_id_1 \gset +SELECT job_id, next_start, scheduled, schedule_interval +FROM timescaledb_information.jobs WHERE job_id > 1000; + job_id | next_start | scheduled | schedule_interval +--------+------------------------------+-----------+------------------- + 1005 | Mon Jan 01 07:00:00 2018 PST | t | @ 1 hour +(1 row) + +\x +SELECT * FROM timescaledb_information.job_stats WHERE job_id > 1000; +-[ RECORD 1 ]----------+----------------------------- +hypertable_schema | +hypertable_name | +job_id | 1005 +last_run_started_at | -infinity +last_successful_finish | -infinity +last_run_status | +job_status | Scheduled +last_run_duration | +next_start | Mon Jan 01 07:00:00 2018 PST +total_runs | 0 +total_successes | 0 +total_failures | 0 + +\x +SELECT delete_job(:job_id_1); + delete_job +------------ + +(1 row) + +-- tests for #3545 +CREATE FUNCTION wait_for_job_to_run(job_param_id INTEGER, expected_runs INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + r RECORD; +BEGIN + FOR i in 1..spins + LOOP + SELECT total_successes, total_failures FROM _timescaledb_internal.bgw_job_stat WHERE job_id=job_param_id INTO r; + IF (r.total_failures > 0) THEN + RAISE INFO 'wait_for_job_to_run: job execution failed'; + RETURN false; + ELSEIF (r.total_successes = expected_runs) THEN + RETURN true; + ELSEIF (r.total_successes > expected_runs) THEN + RAISE 'num_runs > expected'; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RAISE INFO 'wait_for_job_to_run: timeout after % tries', spins; + RETURN false; +END +$BODY$; +TRUNCATE custom_log; +-- Nested procedure call +CREATE OR REPLACE PROCEDURE custom_proc_nested(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + INSERT INTO custom_log VALUES($1, $2, 'custom_proc_nested 1 COMMIT'); + COMMIT; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc_nested 2 ROLLBACK'); + ROLLBACK; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc_nested 3 COMMIT'); + COMMIT; +END +$$; +CREATE OR REPLACE PROCEDURE custom_proc3(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + CALL custom_proc_nested(job_id, args); +END +$$; +CREATE OR REPLACE PROCEDURE custom_proc4(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + INSERT INTO custom_log VALUES($1, $2, 'custom_proc4 1 COMMIT'); + COMMIT; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc4 2 ROLLBACK'); + ROLLBACK; + RAISE EXCEPTION 'forced exception'; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc4 3 ABORT'); + COMMIT; +END +$$; +CREATE OR REPLACE PROCEDURE custom_proc5(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + CALL refresh_continuous_aggregate('conditions_summary_daily', '2021-08-01 00:00', '2021-08-31 00:00'); +END +$$; +-- Remove any default jobs, e.g., telemetry +\c :TEST_DBNAME :ROLE_SUPERUSER +TRUNCATE _timescaledb_config.bgw_job RESTART IDENTITY CASCADE; +NOTICE: truncate cascades to table "bgw_job_stat" +NOTICE: truncate cascades to table "bgw_policy_chunk_stats" +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT add_job('custom_proc2', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_1 \gset +SELECT add_job('custom_proc3', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_2 \gset +\c :TEST_DBNAME :ROLE_SUPERUSER +-- Start Background Workers +SELECT _timescaledb_functions.start_background_workers(); + start_background_workers +-------------------------- + t +(1 row) + +-- Wait for jobs +SELECT wait_for_job_to_run(:job_id_1, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT wait_for_job_to_run(:job_id_2, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +-- Check results +SELECT * FROM custom_log ORDER BY job_id, extra; + job_id | args | extra | runner +--------+-----------------------+---------------------------------+------------------- + 1000 | {"type": "procedure"} | custom_proc2 1 COMMIT procedure | default_perm_user + 1000 | {"type": "procedure"} | custom_proc2 3 COMMIT procedure | default_perm_user + 1001 | {"type": "procedure"} | custom_proc_nested 1 COMMIT | default_perm_user + 1001 | {"type": "procedure"} | custom_proc_nested 3 COMMIT | default_perm_user +(4 rows) + +-- Delete previous jobs +SELECT delete_job(:job_id_1); + delete_job +------------ + +(1 row) + +SELECT delete_job(:job_id_2); + delete_job +------------ + +(1 row) + +TRUNCATE custom_log; +-- Forced Exception +SELECT add_job('custom_proc4', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_3 \gset +SELECT wait_for_job_to_run(:job_id_3, 1); +INFO: wait_for_job_to_run: job execution failed + wait_for_job_to_run +--------------------- + f +(1 row) + +-- Check results +SELECT * FROM custom_log ORDER BY job_id, extra; + job_id | args | extra | runner +--------+-----------------------+-----------------------+------------ + 1002 | {"type": "procedure"} | custom_proc4 1 COMMIT | super_user +(1 row) + +-- Delete previous jobs +SELECT delete_job(:job_id_3); + delete_job +------------ + +(1 row) + +CREATE TABLE conditions ( + time TIMESTAMP NOT NULL, + location TEXT NOT NULL, + location2 char(10) NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL +) WITH (autovacuum_enabled = FALSE); +SELECT create_hypertable('conditions', 'time', chunk_time_interval := '15 days'::interval); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + create_hypertable +------------------------- + (1,public,conditions,t) +(1 row) + +ALTER TABLE conditions + SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'location', + timescaledb.compress_orderby = 'time' +); +INSERT INTO conditions +SELECT generate_series('2021-08-01 00:00'::timestamp, '2021-08-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75; +-- Chunk compress stats +SELECT * FROM _timescaledb_internal.compressed_chunk_stats ORDER BY chunk_name; + hypertable_schema | hypertable_name | chunk_schema | chunk_name | compression_status | uncompressed_heap_size | uncompressed_index_size | uncompressed_toast_size | uncompressed_total_size | compressed_heap_size | compressed_index_size | compressed_toast_size | compressed_total_size +-------------------+-----------------+-----------------------+------------------+--------------------+------------------------+-------------------------+-------------------------+-------------------------+----------------------+-----------------------+-----------------------+----------------------- + public | conditions | _timescaledb_internal | _hyper_1_1_chunk | Uncompressed | | | | | | | | + public | conditions | _timescaledb_internal | _hyper_1_2_chunk | Uncompressed | | | | | | | | + public | conditions | _timescaledb_internal | _hyper_1_3_chunk | Uncompressed | | | | | | | | +(3 rows) + +-- Compression policy +SELECT add_compression_policy('conditions', interval '1 day') AS job_id_4 \gset +SELECT wait_for_job_to_run(:job_id_4, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +-- Chunk compress stats +SELECT * FROM _timescaledb_internal.compressed_chunk_stats ORDER BY chunk_name; + hypertable_schema | hypertable_name | chunk_schema | chunk_name | compression_status | uncompressed_heap_size | uncompressed_index_size | uncompressed_toast_size | uncompressed_total_size | compressed_heap_size | compressed_index_size | compressed_toast_size | compressed_total_size +-------------------+-----------------+-----------------------+------------------+--------------------+------------------------+-------------------------+-------------------------+-------------------------+----------------------+-----------------------+-----------------------+----------------------- + public | conditions | _timescaledb_internal | _hyper_1_1_chunk | Compressed | 8192 | 16384 | 8192 | 32768 | 16384 | 16384 | 8192 | 40960 + public | conditions | _timescaledb_internal | _hyper_1_2_chunk | Compressed | 8192 | 16384 | 8192 | 32768 | 16384 | 16384 | 8192 | 40960 + public | conditions | _timescaledb_internal | _hyper_1_3_chunk | Compressed | 8192 | 16384 | 8192 | 32768 | 16384 | 16384 | 8192 | 40960 +(3 rows) + +--TEST compression job after inserting data into previously compressed chunk +INSERT INTO conditions +SELECT generate_series('2021-08-01 00:00'::timestamp, '2021-08-31 00:00'::timestamp, '1 day'), 'NYC', 'nycity', 40, 40; +SELECT id, table_name, status from _timescaledb_catalog.chunk +where hypertable_id = (select id from _timescaledb_catalog.hypertable + where table_name = 'conditions') +order by id; + id | table_name | status +----+------------------+-------- + 1 | _hyper_1_1_chunk | 9 + 2 | _hyper_1_2_chunk | 9 + 3 | _hyper_1_3_chunk | 9 +(3 rows) + +--running job second time, wait for it to complete +select t.schedule_interval FROM alter_job(:job_id_4, next_start=> now() ) t; + schedule_interval +------------------- + @ 12 hours +(1 row) + +SELECT wait_for_job_to_run(:job_id_4, 2); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT id, table_name, status from _timescaledb_catalog.chunk +where hypertable_id = (select id from _timescaledb_catalog.hypertable + where table_name = 'conditions') +order by id; + id | table_name | status +----+------------------+-------- + 1 | _hyper_1_1_chunk | 1 + 2 | _hyper_1_2_chunk | 1 + 3 | _hyper_1_3_chunk | 1 +(3 rows) + +-- Drop the compression job +SELECT delete_job(:job_id_4); + delete_job +------------ + +(1 row) + +-- Decompress chunks before create the cagg +SELECT decompress_chunk(c) FROM show_chunks('conditions') c; + decompress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk +(3 rows) + +-- TEST Continuous Aggregate job +CREATE MATERIALIZED VIEW conditions_summary_daily +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature), + MAX(temperature), + MIN(temperature) +FROM conditions +GROUP BY location, bucket +WITH NO DATA; +-- Refresh Continous Aggregate by Job +SELECT add_job('custom_proc5', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_5 \gset +SELECT wait_for_job_to_run(:job_id_5, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT count(*) FROM conditions_summary_daily; + count +------- + 62 +(1 row) + +-- TESTs for alter_job_set_hypertable_id API +SELECT _timescaledb_functions.alter_job_set_hypertable_id( :job_id_5, NULL); + alter_job_set_hypertable_id +----------------------------- + 1004 +(1 row) + +SELECT id, proc_name, hypertable_id +FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; + id | proc_name | hypertable_id +------+--------------+--------------- + 1004 | custom_proc5 | +(1 row) + +-- error case, try to associate with a PG relation +\set ON_ERROR_STOP 0 +SELECT _timescaledb_functions.alter_job_set_hypertable_id( :job_id_5, 'custom_log'); +ERROR: relation "custom_log" is not a hypertable or continuous aggregate +\set ON_ERROR_STOP 1 +-- TEST associate the cagg with the job +SELECT _timescaledb_functions.alter_job_set_hypertable_id( :job_id_5, 'conditions_summary_daily'::regclass); + alter_job_set_hypertable_id +----------------------------- + 1004 +(1 row) + +SELECT id, proc_name, hypertable_id +FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; + id | proc_name | hypertable_id +------+--------------+--------------- + 1004 | custom_proc5 | 3 +(1 row) + +--verify that job is dropped when cagg is dropped +DROP MATERIALIZED VIEW conditions_summary_daily; +NOTICE: drop cascades to table _timescaledb_internal._hyper_3_10_chunk +SELECT id, proc_name, hypertable_id +FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; + id | proc_name | hypertable_id +----+-----------+--------------- +(0 rows) + +-- Cleanup +DROP TABLE conditions; +DROP TABLE custom_log; +-- Stop Background Workers +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +SELECT _timescaledb_functions.restart_background_workers(); + restart_background_workers +---------------------------- + t +(1 row) + +\set ON_ERROR_STOP 0 +-- add test for custom jobs with custom check functions +-- create the functions/procedures to be used as checking functions +CREATE OR REPLACE PROCEDURE test_config_check_proc(config jsonb) +LANGUAGE PLPGSQL +AS $$ +DECLARE + drop_after interval; +BEGIN + SELECT jsonb_object_field_text (config, 'drop_after')::interval INTO STRICT drop_after; + IF drop_after IS NULL THEN + RAISE EXCEPTION 'Config must be not NULL and have drop_after'; + END IF ; +END +$$; +CREATE OR REPLACE FUNCTION test_config_check_func(config jsonb) RETURNS VOID +AS $$ +DECLARE + drop_after interval; +BEGIN + IF config IS NULL THEN + RETURN; + END IF; + SELECT jsonb_object_field_text (config, 'drop_after')::interval INTO STRICT drop_after; + IF drop_after IS NULL THEN + RAISE EXCEPTION 'Config can be NULL but must have drop_after if not'; + END IF ; +END +$$ LANGUAGE PLPGSQL; +-- step 2, create a procedure to run as a custom job +CREATE OR REPLACE PROCEDURE test_proc_with_check(job_id int, config jsonb) +LANGUAGE PLPGSQL +AS $$ +BEGIN + RAISE NOTICE 'Will only print this if config passes checks, my config is %', config; +END +$$; +-- step 3, add the job with the config check function passed as argument +-- test procedures, should get an unsupported error +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_proc'::regproc); +ERROR: unsupported function type +-- test functions +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func'::regproc); +ERROR: Config can be NULL but must have drop_after if not +select add_job('test_proc_with_check', '5 secs', config => NULL, check_config => 'test_config_check_func'::regproc); + add_job +--------- + 1005 +(1 row) + +select add_job('test_proc_with_check', '5 secs', config => '{"drop_after": "chicken"}', check_config => 'test_config_check_func'::regproc); +ERROR: invalid input syntax for type interval: "chicken" +select add_job('test_proc_with_check', '5 secs', config => '{"drop_after": "2 weeks"}', check_config => 'test_config_check_func'::regproc) +as job_with_func_check_id \gset +--- test alter_job +select alter_job(:job_with_func_check_id, config => '{"drop_after":"chicken"}'); +ERROR: invalid input syntax for type interval: "chicken" +select config from alter_job(:job_with_func_check_id, config => '{"drop_after":"5 years"}'); + config +--------------------------- + {"drop_after": "5 years"} +(1 row) + +-- test that jobs with an incorrect check function signature will not be registered +-- these are all incorrect function signatures +CREATE OR REPLACE FUNCTION test_config_check_func_0args() RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'I take no arguments and will validate anything you give me!'; +END +$$ LANGUAGE PLPGSQL; +CREATE OR REPLACE FUNCTION test_config_check_func_2args(config jsonb, intarg int) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'I take two arguments (jsonb, int) and I should fail to run!'; +END +$$ LANGUAGE PLPGSQL; +CREATE OR REPLACE FUNCTION test_config_check_func_intarg(config int) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'I take one argument which is an integer and I should fail to run!'; +END +$$ LANGUAGE PLPGSQL; +-- -- this should fail, it has an incorrect check function +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_0args'::regproc); +ERROR: function or procedure public.test_config_check_func_0args(config jsonb) not found +-- -- so should this +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_2args'::regproc); +ERROR: function or procedure public.test_config_check_func_2args(config jsonb) not found +-- and this +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_intarg'::regproc); +ERROR: function or procedure public.test_config_check_func_intarg(config jsonb) not found +-- and this fails as it calls a nonexistent function +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_nonexistent_check_func'::regproc); +ERROR: function "test_nonexistent_check_func" does not exist at character 82 +-- when called with a valid check function and a NULL config no check should occur +CREATE OR REPLACE FUNCTION test_config_check_func(config jsonb) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'This message will get printed for both NULL and not NULL config'; +END +$$ LANGUAGE PLPGSQL; +SET client_min_messages = NOTICE; +-- check done for both NULL and non-NULL config +select add_job('test_proc_with_check', '5 secs', config => NULL, check_config => 'test_config_check_func'::regproc); +NOTICE: This message will get printed for both NULL and not NULL config + add_job +--------- + 1007 +(1 row) + +-- check done +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func'::regproc) as job_id \gset +NOTICE: This message will get printed for both NULL and not NULL config +-- check function not returning void +CREATE OR REPLACE FUNCTION test_config_check_func_returns_int(config jsonb) RETURNS INT +AS $$ +BEGIN + raise notice 'I print a message, and then I return least(1,2)'; + RETURN LEAST(1, 2); +END +$$ LANGUAGE PLPGSQL; +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_returns_int'::regproc, +initial_start => :'time_zero'::timestamptz) as job_id_int \gset +NOTICE: I print a message, and then I return least(1,2) +-- drop the registered check function, verify that alter_job will work and print a warning that +-- the check is being skipped due to the check function missing +ALTER FUNCTION test_config_check_func RENAME TO renamed_func; +select job_id, schedule_interval, config, check_config from alter_job(:job_id, schedule_interval => '1 hour'); +WARNING: function public.test_config_check_func(config jsonb) not found, skipping config validation for job 1008 + job_id | schedule_interval | config | check_config +--------+-------------------+--------+------------------------------- + 1008 | @ 1 hour | {} | public.test_config_check_func +(1 row) + +DROP FUNCTION test_config_check_func_returns_int; +select job_id, schedule_interval, config, check_config from alter_job(:job_id_int, config => '{"field":"value"}'); +WARNING: function public.test_config_check_func_returns_int(config jsonb) not found, skipping config validation for job 1009 + job_id | schedule_interval | config | check_config +--------+-------------------+--------------------+------------------------------------------- + 1009 | @ 5 secs | {"field": "value"} | public.test_config_check_func_returns_int +(1 row) + +-- rename the check function and then call alter_job to register the new name +select job_id, schedule_interval, config, check_config from alter_job(:job_id, check_config => 'renamed_func'::regproc); +NOTICE: This message will get printed for both NULL and not NULL config + job_id | schedule_interval | config | check_config +--------+-------------------+--------+--------------------- + 1008 | @ 1 hour | {} | public.renamed_func +(1 row) + +-- run alter again, should get a config check +select job_id, schedule_interval, config, check_config from alter_job(:job_id, config => '{}'); +NOTICE: This message will get printed for both NULL and not NULL config + job_id | schedule_interval | config | check_config +--------+-------------------+--------+--------------------- + 1008 | @ 1 hour | {} | public.renamed_func +(1 row) + +-- do not drop the current check function but register a new one +CREATE OR REPLACE FUNCTION substitute_check_func(config jsonb) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'This message is a substitute of the previously printed one'; +END +$$ LANGUAGE PLPGSQL; +-- register the new check +select job_id, schedule_interval, config, check_config from alter_job(:job_id, check_config => 'substitute_check_func'); +NOTICE: This message is a substitute of the previously printed one + job_id | schedule_interval | config | check_config +--------+-------------------+--------+------------------------------ + 1008 | @ 1 hour | {} | public.substitute_check_func +(1 row) + +select job_id, schedule_interval, config, check_config from alter_job(:job_id, config => '{}'); +NOTICE: This message is a substitute of the previously printed one + job_id | schedule_interval | config | check_config +--------+-------------------+--------+------------------------------ + 1008 | @ 1 hour | {} | public.substitute_check_func +(1 row) + +RESET client_min_messages; +-- test an oid that doesn't exist +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 17424217::regproc); +ERROR: function with OID 17424217 does not exist +\c :TEST_DBNAME :ROLE_SUPERUSER +-- test a function with insufficient privileges +create schema test_schema; +create role user_noexec with login; +grant usage on schema test_schema to user_noexec; +CREATE OR REPLACE FUNCTION test_schema.test_config_check_func_privileges(config jsonb) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'This message will only get printed if privileges suffice'; +END +$$ LANGUAGE PLPGSQL; +revoke execute on function test_schema.test_config_check_func_privileges from public; +-- verify the user doesn't have execute permissions on the function +select has_function_privilege('user_noexec', 'test_schema.test_config_check_func_privileges(jsonb)', 'execute'); + has_function_privilege +------------------------ + f +(1 row) + +\c :TEST_DBNAME user_noexec +-- user_noexec should not have exec permissions on this function +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_schema.test_config_check_func_privileges'::regproc); +ERROR: permission denied for function "test_config_check_func_privileges" +\c :TEST_DBNAME :ROLE_SUPERUSER +-- check that alter_job rejects a check function with invalid signature +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'renamed_func', +initial_start => :'time_zero'::timestamptz) as job_id_alter \gset +NOTICE: This message will get printed for both NULL and not NULL config +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter, check_config => 'test_config_check_func_0args'); +ERROR: function or procedure public.test_config_check_func_0args(config jsonb) not found +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter); +NOTICE: This message will get printed for both NULL and not NULL config + job_id | schedule_interval | config | check_config +--------+-------------------+--------+--------------------- + 1010 | @ 5 secs | {} | public.renamed_func +(1 row) + +-- test that we can unregister the check function +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter, check_config => 0); + job_id | schedule_interval | config | check_config +--------+-------------------+--------+-------------- + 1010 | @ 5 secs | {} | +(1 row) + +-- no message printed now +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter, config => '{}'); + job_id | schedule_interval | config | check_config +--------+-------------------+--------+-------------- + 1010 | @ 5 secs | {} | +(1 row) + +-- test the case where we have a background job that registers jobs with a check fn +CREATE OR REPLACE PROCEDURE add_scheduled_jobs_with_check(job_id int, config jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + perform add_job('test_proc_with_check', schedule_interval => '10 secs', config => '{}', check_config => 'renamed_func'); +END +$$; +select add_job('add_scheduled_jobs_with_check', schedule_interval => '1 hour') as last_job_id \gset +-- wait for enough time +SELECT wait_for_job_to_run(:last_job_id, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +select total_runs, total_successes, last_run_status from timescaledb_information.job_stats where job_id = :last_job_id; + total_runs | total_successes | last_run_status +------------+-----------------+----------------- + 1 | 1 | Success +(1 row) + +-- test coverage for alter_job +-- registering an invalid oid +select alter_job(:job_id_alter, check_config => 123456789::regproc); +ERROR: function with OID 123456789 does not exist +-- registering a function with insufficient privileges +\c :TEST_DBNAME user_noexec +select * from add_job('test_proc_with_check', '5 secs', config => '{}') as job_id_owner \gset +select * from alter_job(:job_id_owner, check_config => 'test_schema.test_config_check_func_privileges'::regproc); +ERROR: permission denied for function "test_config_check_func_privileges" +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP SCHEMA test_schema CASCADE; +NOTICE: drop cascades to function test_schema.test_config_check_func_privileges(jsonb) +-- Delete all jobs with that owner before we can drop the user. +DELETE FROM _timescaledb_config.bgw_job WHERE owner = 'user_noexec'::regrole; +DROP ROLE user_noexec; +-- test with aggregate check proc +create function jsonb_add (j1 jsonb, j2 jsonb) returns jsonb +AS $$ +BEGIN + RETURN j1 || j2; +END +$$ LANGUAGE PLPGSQL; +CREATE AGGREGATE sum_jsb (jsonb) +( + sfunc = jsonb_add, + stype = jsonb, + initcond = '{}' +); +-- for test coverage, check unsupported aggregate type +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'sum_jsb'::regproc); +ERROR: unsupported function type +-- Cleanup jobs +TRUNCATE _timescaledb_config.bgw_job CASCADE; +NOTICE: truncate cascades to table "bgw_job_stat" +NOTICE: truncate cascades to table "bgw_policy_chunk_stats" +-- github issue 4610 +CREATE TABLE sensor_data +( + time timestamptz not null, + sensor_id integer not null, + cpu double precision null, + temperature double precision null +); +SELECT FROM create_hypertable('sensor_data','time'); +-- +(1 row) + +SELECT '2022-10-06 00:00:00+00' as start_date_sd \gset +INSERT INTO sensor_data + SELECT + time + (INTERVAL '1 minute' * random()) AS time, + sensor_id, + random() AS cpu, + random()* 100 AS temperature + FROM + generate_series(:'start_date_sd'::timestamptz - INTERVAL '1 months', :'start_date_sd'::timestamptz - INTERVAL '1 week', INTERVAL '1 minute') AS g1(time), + generate_series(1, 50, 1 ) AS g2(sensor_id) + ORDER BY + time; +-- enable compression +ALTER TABLE sensor_data SET (timescaledb.compress, timescaledb.compress_orderby = 'time DESC'); +-- create new chunks +INSERT INTO sensor_data + SELECT + time + (INTERVAL '1 minute' * random()) AS time, + sensor_id, + random() AS cpu, + random()* 100 AS temperature + FROM + generate_series(:'start_date_sd'::timestamptz - INTERVAL '2 months', :'start_date_sd'::timestamptz - INTERVAL '2 week', INTERVAL '2 minute') AS g1(time), + generate_series(1, 30, 1 ) AS g2(sensor_id) + ORDER BY + time; +-- get the name of a new uncompressed chunk +SELECT chunk_name AS new_uncompressed_chunk_name + FROM timescaledb_information.chunks + WHERE hypertable_name = 'sensor_data' AND NOT is_compressed LIMIT 1 \gset +-- change compression status so that this chunk is skipped when policy is run +update _timescaledb_catalog.chunk set status=3 where table_name = :'new_uncompressed_chunk_name'; +-- add new compression policy job +SELECT add_compression_policy('sensor_data', INTERVAL '1' minute) AS compressjob_id \gset +-- set recompress to true +SELECT alter_job(id,config:=jsonb_set(config,'{recompress}', 'true')) FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + alter_job +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1014,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""recompress"": true, ""hypertable_id"": 4, ""compress_after"": ""@ 1 min""}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +-- verify that there are other uncompressed new chunks that need to be compressed +SELECT count(*) > 1 + FROM timescaledb_information.chunks + WHERE hypertable_name = 'sensor_data' AND NOT is_compressed; + ?column? +---------- + t +(1 row) + +-- disable notice/warning as the new_uncompressed_chunk_name +-- is dynamic and it will be printed in those messages. +SET client_min_messages TO ERROR; +CALL run_job(:compressjob_id); +SET client_min_messages TO NOTICE; +-- check compression status is not changed for the chunk whose status was manually updated +SELECT status FROM _timescaledb_catalog.chunk where table_name = :'new_uncompressed_chunk_name'; + status +-------- + 3 +(1 row) + +-- confirm all the other new chunks are now compressed despite +-- facing an error when trying to compress :'new_uncompressed_chunk_name' +SELECT count(*) = 0 + FROM timescaledb_information.chunks + WHERE hypertable_name = 'sensor_data' AND NOT is_compressed; + ?column? +---------- + t +(1 row) + +-- cleanup +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +DROP TABLE sensor_data; +SELECT _timescaledb_functions.restart_background_workers(); + restart_background_workers +---------------------------- + t +(1 row) + +-- Github issue #5537 +-- Proc that waits until the given job enters the expected state +CREATE OR REPLACE PROCEDURE wait_for_job_status(job_param_id INTEGER, expected_status TEXT, spins INTEGER=:TEST_SPINWAIT_ITERS) +LANGUAGE PLPGSQL AS $$ +DECLARE + jobstatus TEXT; +BEGIN + FOR i in 1..spins + LOOP + SELECT job_status FROM timescaledb_information.job_stats WHERE job_id = job_param_id INTO jobstatus; + IF jobstatus = expected_status THEN + RETURN; + END IF; + PERFORM pg_sleep(0.1); + ROLLBACK; + END LOOP; + RAISE EXCEPTION 'wait_for_job_status(%): timeout after % tries', job_param_id, spins; +END; +$$; +-- Proc that sleeps for 1m - to keep the test jobs in running state +CREATE OR REPLACE PROCEDURE proc_that_sleeps(job_id INT, config JSONB) +LANGUAGE PLPGSQL AS +$$ +BEGIN + PERFORM pg_sleep(60); +END +$$; +-- create new jobs and ensure that the second one gets scheduled +-- before the first one by adjusting the initial_start values +SELECT add_job('proc_that_sleeps', '1h', initial_start => now()::timestamptz + interval '2s') AS job_id_1 \gset +SELECT add_job('proc_that_sleeps', '1h', initial_start => now()::timestamptz - interval '2s') AS job_id_2 \gset +-- wait for the jobs to start running job_2 will start running first +CALL wait_for_job_status(:job_id_2, 'Running'); +CALL wait_for_job_status(:job_id_1, 'Running'); +-- add a new job and wait for it to start +SELECT add_job('proc_that_sleeps', '1h') AS job_id_3 \gset +CALL wait_for_job_status(:job_id_3, 'Running'); +-- verify that none of the jobs crashed +SELECT job_id, job_status, next_start, + total_runs, total_successes, total_failures + FROM timescaledb_information.job_stats + WHERE job_id IN (:job_id_1, :job_id_2, :job_id_3) + ORDER BY job_id; + job_id | job_status | next_start | total_runs | total_successes | total_failures +--------+------------+------------+------------+-----------------+---------------- + 1015 | Running | -infinity | 1 | 0 | 0 + 1016 | Running | -infinity | 1 | 0 | 0 + 1017 | Running | -infinity | 1 | 0 | 0 +(3 rows) + +SELECT job_id, err_message + FROM timescaledb_information.job_errors + WHERE job_id IN (:job_id_1, :job_id_2, :job_id_3); + job_id | err_message +--------+------------- +(0 rows) + +-- cleanup +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +CALL wait_for_job_status(:job_id_1, 'Scheduled'); +CALL wait_for_job_status(:job_id_2, 'Scheduled'); +CALL wait_for_job_status(:job_id_3, 'Scheduled'); +SELECT delete_job(:job_id_1); + delete_job +------------ + +(1 row) + +SELECT delete_job(:job_id_2); + delete_job +------------ + +(1 row) + +SELECT delete_job(:job_id_3); + delete_job +------------ + +(1 row) + diff --git a/tsl/test/expected/bgw_custom-16.out b/tsl/test/expected/bgw_custom-16.out new file mode 100644 index 00000000000..6f308e7d361 --- /dev/null +++ b/tsl/test/expected/bgw_custom-16.out @@ -0,0 +1,1072 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE TABLE custom_log(job_id int, args jsonb, extra text, runner NAME DEFAULT CURRENT_ROLE); +CREATE OR REPLACE FUNCTION custom_func(jobid int, args jsonb) RETURNS VOID LANGUAGE SQL AS +$$ + INSERT INTO custom_log VALUES($1, $2, 'custom_func'); +$$; +CREATE OR REPLACE FUNCTION custom_func_definer(jobid int, args jsonb) RETURNS VOID LANGUAGE SQL AS +$$ + INSERT INTO custom_log VALUES($1, $2, 'security definer'); +$$ SECURITY DEFINER; +CREATE OR REPLACE PROCEDURE custom_proc(job_id int, args jsonb) LANGUAGE SQL AS +$$ + INSERT INTO custom_log VALUES($1, $2, 'custom_proc'); +$$; +-- procedure with transaction handling +CREATE OR REPLACE PROCEDURE custom_proc2(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + INSERT INTO custom_log VALUES($1, $2, 'custom_proc2 1 COMMIT ' || (args->>'type')); + COMMIT; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc2 2 ROLLBACK ' || (args->>'type')); + ROLLBACK; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc2 3 COMMIT ' || (args->>'type')); + COMMIT; +END +$$; +\set ON_ERROR_STOP 0 +-- test bad input +SELECT add_job(NULL, '1h'); +ERROR: function or procedure cannot be NULL +SELECT add_job(0, '1h'); +ERROR: function or procedure with OID 0 does not exist +-- this will return an error about Oid 4294967295 +-- while regproc is unsigned int postgres has an implicit cast from int to regproc +SELECT add_job(-1, '1h'); +ERROR: function or procedure with OID 4294967295 does not exist +SELECT add_job('invalid_func', '1h'); +ERROR: function "invalid_func" does not exist at character 16 +SELECT add_job('custom_func', NULL); +ERROR: schedule interval cannot be NULL +SELECT add_job('custom_func', 'invalid interval'); +ERROR: invalid input syntax for type interval: "invalid interval" at character 31 +\set ON_ERROR_STOP 1 +select '2000-01-01 00:00:00+00' as time_zero \gset +SELECT add_job('custom_func','1h', config:='{"type":"function"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1000 +(1 row) + +SELECT add_job('custom_proc','1h', config:='{"type":"procedure"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1001 +(1 row) + +SELECT add_job('custom_proc2','1h', config:= '{"type":"procedure"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1002 +(1 row) + +SELECT add_job('custom_func', '1h', config:='{"type":"function"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1003 +(1 row) + +SELECT add_job('custom_func_definer', '1h', config:='{"type":"function"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1004 +(1 row) + +-- exclude the telemetry[1] and job error retention[2] jobs +-- job 2 may have already run which will set its next_start field thus making the test flaky +SELECT * FROM timescaledb_information.jobs WHERE job_id NOT IN (1,2) ORDER BY 1; + job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name +--------+----------------------------+-------------------+-------------+-------------+--------------+-------------+---------------------+-------------------+-----------+----------------+-----------------------+------------------------------+------------------------------+-------------------+-----------------+--------------+------------ + 1000 | User-Defined Action [1000] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_func | default_perm_user | t | t | {"type": "function"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1001 | User-Defined Action [1001] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_proc | default_perm_user | t | t | {"type": "procedure"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1002 | User-Defined Action [1002] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_proc2 | default_perm_user | t | t | {"type": "procedure"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1003 | User-Defined Action [1003] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_func | default_perm_user | t | t | {"type": "function"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1004 | User-Defined Action [1004] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_func_definer | default_perm_user | t | t | {"type": "function"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | +(5 rows) + +SELECT count(*) FROM _timescaledb_config.bgw_job WHERE config->>'type' IN ('procedure', 'function'); + count +------- + 5 +(1 row) + +\set ON_ERROR_STOP 0 +-- test bad input +CALL run_job(NULL); +ERROR: job ID cannot be NULL +CALL run_job(-1); +ERROR: job -1 not found +\set ON_ERROR_STOP 1 +CALL run_job(1000); +CALL run_job(1001); +CALL run_job(1002); +CALL run_job(1003); +CALL run_job(1004); +SELECT * FROM custom_log ORDER BY job_id, extra; + job_id | args | extra | runner +--------+-----------------------+---------------------------------+------------------- + 1000 | {"type": "function"} | custom_func | default_perm_user + 1001 | {"type": "procedure"} | custom_proc | default_perm_user + 1002 | {"type": "procedure"} | custom_proc2 1 COMMIT procedure | default_perm_user + 1002 | {"type": "procedure"} | custom_proc2 3 COMMIT procedure | default_perm_user + 1003 | {"type": "function"} | custom_func | default_perm_user + 1004 | {"type": "function"} | security definer | default_perm_user +(6 rows) + +\set ON_ERROR_STOP 0 +-- test bad input +SELECT delete_job(NULL); + delete_job +------------ + +(1 row) + +SELECT delete_job(-1); +ERROR: job -1 not found +\set ON_ERROR_STOP 1 +-- We keep job 1000 for some additional checks. +SELECT delete_job(1001); + delete_job +------------ + +(1 row) + +SELECT delete_job(1002); + delete_job +------------ + +(1 row) + +SELECT delete_job(1003); + delete_job +------------ + +(1 row) + +SELECT delete_job(1004); + delete_job +------------ + +(1 row) + +-- check jobs got removed +SELECT count(*) FROM timescaledb_information.jobs WHERE job_id >= 1001; + count +------- + 0 +(1 row) + +\c :TEST_DBNAME :ROLE_SUPERUSER +\set ON_ERROR_STOP 0 +-- test bad input +SELECT alter_job(NULL, if_exists => false); +ERROR: job ID cannot be NULL +SELECT alter_job(-1, if_exists => false); +ERROR: job -1 not found +\set ON_ERROR_STOP 1 +-- test bad input but don't fail +SELECT alter_job(NULL, if_exists => true); +NOTICE: job 0 not found, skipping + alter_job +----------- + +(1 row) + +SELECT alter_job(-1, if_exists => true); +NOTICE: job -1 not found, skipping + alter_job +----------- + +(1 row) + +-- test altering job with NULL config +SELECT job_id FROM alter_job(1000,scheduled:=false); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+---------------------- + f | {"type": "function"} +(1 row) + +-- test updating job settings +SELECT job_id FROM alter_job(1000,config:='{"test":"test"}'); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+------------------ + f | {"test": "test"} +(1 row) + +SELECT job_id FROM alter_job(1000,scheduled:=true); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+------------------ + t | {"test": "test"} +(1 row) + +SELECT job_id FROM alter_job(1000,scheduled:=false); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+------------------ + f | {"test": "test"} +(1 row) + +-- Done with job 1000 now, so remove it. +SELECT delete_job(1000); + delete_job +------------ + +(1 row) + +--test for #2793 +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +-- background workers are disabled, so the job will not run -- +SELECT add_job( proc=>'custom_func', + schedule_interval=>'1h', initial_start =>'2018-01-01 10:00:00-05') AS job_id_1 \gset +SELECT job_id, next_start, scheduled, schedule_interval +FROM timescaledb_information.jobs WHERE job_id > 1000; + job_id | next_start | scheduled | schedule_interval +--------+------------------------------+-----------+------------------- + 1005 | Mon Jan 01 07:00:00 2018 PST | t | @ 1 hour +(1 row) + +\x +SELECT * FROM timescaledb_information.job_stats WHERE job_id > 1000; +-[ RECORD 1 ]----------+----------------------------- +hypertable_schema | +hypertable_name | +job_id | 1005 +last_run_started_at | -infinity +last_successful_finish | -infinity +last_run_status | +job_status | Scheduled +last_run_duration | +next_start | Mon Jan 01 07:00:00 2018 PST +total_runs | 0 +total_successes | 0 +total_failures | 0 + +\x +SELECT delete_job(:job_id_1); + delete_job +------------ + +(1 row) + +-- tests for #3545 +CREATE FUNCTION wait_for_job_to_run(job_param_id INTEGER, expected_runs INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + r RECORD; +BEGIN + FOR i in 1..spins + LOOP + SELECT total_successes, total_failures FROM _timescaledb_internal.bgw_job_stat WHERE job_id=job_param_id INTO r; + IF (r.total_failures > 0) THEN + RAISE INFO 'wait_for_job_to_run: job execution failed'; + RETURN false; + ELSEIF (r.total_successes = expected_runs) THEN + RETURN true; + ELSEIF (r.total_successes > expected_runs) THEN + RAISE 'num_runs > expected'; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RAISE INFO 'wait_for_job_to_run: timeout after % tries', spins; + RETURN false; +END +$BODY$; +TRUNCATE custom_log; +-- Nested procedure call +CREATE OR REPLACE PROCEDURE custom_proc_nested(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + INSERT INTO custom_log VALUES($1, $2, 'custom_proc_nested 1 COMMIT'); + COMMIT; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc_nested 2 ROLLBACK'); + ROLLBACK; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc_nested 3 COMMIT'); + COMMIT; +END +$$; +CREATE OR REPLACE PROCEDURE custom_proc3(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + CALL custom_proc_nested(job_id, args); +END +$$; +CREATE OR REPLACE PROCEDURE custom_proc4(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + INSERT INTO custom_log VALUES($1, $2, 'custom_proc4 1 COMMIT'); + COMMIT; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc4 2 ROLLBACK'); + ROLLBACK; + RAISE EXCEPTION 'forced exception'; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc4 3 ABORT'); + COMMIT; +END +$$; +CREATE OR REPLACE PROCEDURE custom_proc5(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + CALL refresh_continuous_aggregate('conditions_summary_daily', '2021-08-01 00:00', '2021-08-31 00:00'); +END +$$; +-- Remove any default jobs, e.g., telemetry +\c :TEST_DBNAME :ROLE_SUPERUSER +TRUNCATE _timescaledb_config.bgw_job RESTART IDENTITY CASCADE; +NOTICE: truncate cascades to table "bgw_job_stat" +NOTICE: truncate cascades to table "bgw_policy_chunk_stats" +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT add_job('custom_proc2', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_1 \gset +SELECT add_job('custom_proc3', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_2 \gset +\c :TEST_DBNAME :ROLE_SUPERUSER +-- Start Background Workers +SELECT _timescaledb_functions.start_background_workers(); + start_background_workers +-------------------------- + t +(1 row) + +-- Wait for jobs +SELECT wait_for_job_to_run(:job_id_1, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT wait_for_job_to_run(:job_id_2, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +-- Check results +SELECT * FROM custom_log ORDER BY job_id, extra; + job_id | args | extra | runner +--------+-----------------------+---------------------------------+------------------- + 1000 | {"type": "procedure"} | custom_proc2 1 COMMIT procedure | default_perm_user + 1000 | {"type": "procedure"} | custom_proc2 3 COMMIT procedure | default_perm_user + 1001 | {"type": "procedure"} | custom_proc_nested 1 COMMIT | default_perm_user + 1001 | {"type": "procedure"} | custom_proc_nested 3 COMMIT | default_perm_user +(4 rows) + +-- Delete previous jobs +SELECT delete_job(:job_id_1); + delete_job +------------ + +(1 row) + +SELECT delete_job(:job_id_2); + delete_job +------------ + +(1 row) + +TRUNCATE custom_log; +-- Forced Exception +SELECT add_job('custom_proc4', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_3 \gset +SELECT wait_for_job_to_run(:job_id_3, 1); +INFO: wait_for_job_to_run: job execution failed + wait_for_job_to_run +--------------------- + f +(1 row) + +-- Check results +SELECT * FROM custom_log ORDER BY job_id, extra; + job_id | args | extra | runner +--------+-----------------------+-----------------------+------------ + 1002 | {"type": "procedure"} | custom_proc4 1 COMMIT | super_user +(1 row) + +-- Delete previous jobs +SELECT delete_job(:job_id_3); + delete_job +------------ + +(1 row) + +CREATE TABLE conditions ( + time TIMESTAMP NOT NULL, + location TEXT NOT NULL, + location2 char(10) NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL +) WITH (autovacuum_enabled = FALSE); +SELECT create_hypertable('conditions', 'time', chunk_time_interval := '15 days'::interval); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + create_hypertable +------------------------- + (1,public,conditions,t) +(1 row) + +ALTER TABLE conditions + SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'location', + timescaledb.compress_orderby = 'time' +); +INSERT INTO conditions +SELECT generate_series('2021-08-01 00:00'::timestamp, '2021-08-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75; +-- Chunk compress stats +SELECT * FROM _timescaledb_internal.compressed_chunk_stats ORDER BY chunk_name; + hypertable_schema | hypertable_name | chunk_schema | chunk_name | compression_status | uncompressed_heap_size | uncompressed_index_size | uncompressed_toast_size | uncompressed_total_size | compressed_heap_size | compressed_index_size | compressed_toast_size | compressed_total_size +-------------------+-----------------+-----------------------+------------------+--------------------+------------------------+-------------------------+-------------------------+-------------------------+----------------------+-----------------------+-----------------------+----------------------- + public | conditions | _timescaledb_internal | _hyper_1_1_chunk | Uncompressed | | | | | | | | + public | conditions | _timescaledb_internal | _hyper_1_2_chunk | Uncompressed | | | | | | | | + public | conditions | _timescaledb_internal | _hyper_1_3_chunk | Uncompressed | | | | | | | | +(3 rows) + +-- Compression policy +SELECT add_compression_policy('conditions', interval '1 day') AS job_id_4 \gset +SELECT wait_for_job_to_run(:job_id_4, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +-- Chunk compress stats +SELECT * FROM _timescaledb_internal.compressed_chunk_stats ORDER BY chunk_name; + hypertable_schema | hypertable_name | chunk_schema | chunk_name | compression_status | uncompressed_heap_size | uncompressed_index_size | uncompressed_toast_size | uncompressed_total_size | compressed_heap_size | compressed_index_size | compressed_toast_size | compressed_total_size +-------------------+-----------------+-----------------------+------------------+--------------------+------------------------+-------------------------+-------------------------+-------------------------+----------------------+-----------------------+-----------------------+----------------------- + public | conditions | _timescaledb_internal | _hyper_1_1_chunk | Compressed | 8192 | 16384 | 8192 | 32768 | 16384 | 16384 | 8192 | 40960 + public | conditions | _timescaledb_internal | _hyper_1_2_chunk | Compressed | 8192 | 16384 | 8192 | 32768 | 16384 | 16384 | 8192 | 40960 + public | conditions | _timescaledb_internal | _hyper_1_3_chunk | Compressed | 8192 | 16384 | 8192 | 32768 | 16384 | 16384 | 8192 | 40960 +(3 rows) + +--TEST compression job after inserting data into previously compressed chunk +INSERT INTO conditions +SELECT generate_series('2021-08-01 00:00'::timestamp, '2021-08-31 00:00'::timestamp, '1 day'), 'NYC', 'nycity', 40, 40; +SELECT id, table_name, status from _timescaledb_catalog.chunk +where hypertable_id = (select id from _timescaledb_catalog.hypertable + where table_name = 'conditions') +order by id; + id | table_name | status +----+------------------+-------- + 1 | _hyper_1_1_chunk | 9 + 2 | _hyper_1_2_chunk | 9 + 3 | _hyper_1_3_chunk | 9 +(3 rows) + +--running job second time, wait for it to complete +select t.schedule_interval FROM alter_job(:job_id_4, next_start=> now() ) t; + schedule_interval +------------------- + @ 12 hours +(1 row) + +SELECT wait_for_job_to_run(:job_id_4, 2); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT id, table_name, status from _timescaledb_catalog.chunk +where hypertable_id = (select id from _timescaledb_catalog.hypertable + where table_name = 'conditions') +order by id; + id | table_name | status +----+------------------+-------- + 1 | _hyper_1_1_chunk | 1 + 2 | _hyper_1_2_chunk | 1 + 3 | _hyper_1_3_chunk | 1 +(3 rows) + +-- Drop the compression job +SELECT delete_job(:job_id_4); + delete_job +------------ + +(1 row) + +-- Decompress chunks before create the cagg +SELECT decompress_chunk(c) FROM show_chunks('conditions') c; + decompress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk +(3 rows) + +-- TEST Continuous Aggregate job +CREATE MATERIALIZED VIEW conditions_summary_daily +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature), + MAX(temperature), + MIN(temperature) +FROM conditions +GROUP BY location, bucket +WITH NO DATA; +-- Refresh Continous Aggregate by Job +SELECT add_job('custom_proc5', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_5 \gset +SELECT wait_for_job_to_run(:job_id_5, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT count(*) FROM conditions_summary_daily; + count +------- + 62 +(1 row) + +-- TESTs for alter_job_set_hypertable_id API +SELECT _timescaledb_functions.alter_job_set_hypertable_id( :job_id_5, NULL); + alter_job_set_hypertable_id +----------------------------- + 1004 +(1 row) + +SELECT id, proc_name, hypertable_id +FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; + id | proc_name | hypertable_id +------+--------------+--------------- + 1004 | custom_proc5 | +(1 row) + +-- error case, try to associate with a PG relation +\set ON_ERROR_STOP 0 +SELECT _timescaledb_functions.alter_job_set_hypertable_id( :job_id_5, 'custom_log'); +ERROR: relation "custom_log" is not a hypertable or continuous aggregate +\set ON_ERROR_STOP 1 +-- TEST associate the cagg with the job +SELECT _timescaledb_functions.alter_job_set_hypertable_id( :job_id_5, 'conditions_summary_daily'::regclass); + alter_job_set_hypertable_id +----------------------------- + 1004 +(1 row) + +SELECT id, proc_name, hypertable_id +FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; + id | proc_name | hypertable_id +------+--------------+--------------- + 1004 | custom_proc5 | 3 +(1 row) + +--verify that job is dropped when cagg is dropped +DROP MATERIALIZED VIEW conditions_summary_daily; +NOTICE: drop cascades to table _timescaledb_internal._hyper_3_10_chunk +SELECT id, proc_name, hypertable_id +FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; + id | proc_name | hypertable_id +----+-----------+--------------- +(0 rows) + +-- Cleanup +DROP TABLE conditions; +DROP TABLE custom_log; +-- Stop Background Workers +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +SELECT _timescaledb_functions.restart_background_workers(); + restart_background_workers +---------------------------- + t +(1 row) + +\set ON_ERROR_STOP 0 +-- add test for custom jobs with custom check functions +-- create the functions/procedures to be used as checking functions +CREATE OR REPLACE PROCEDURE test_config_check_proc(config jsonb) +LANGUAGE PLPGSQL +AS $$ +DECLARE + drop_after interval; +BEGIN + SELECT jsonb_object_field_text (config, 'drop_after')::interval INTO STRICT drop_after; + IF drop_after IS NULL THEN + RAISE EXCEPTION 'Config must be not NULL and have drop_after'; + END IF ; +END +$$; +CREATE OR REPLACE FUNCTION test_config_check_func(config jsonb) RETURNS VOID +AS $$ +DECLARE + drop_after interval; +BEGIN + IF config IS NULL THEN + RETURN; + END IF; + SELECT jsonb_object_field_text (config, 'drop_after')::interval INTO STRICT drop_after; + IF drop_after IS NULL THEN + RAISE EXCEPTION 'Config can be NULL but must have drop_after if not'; + END IF ; +END +$$ LANGUAGE PLPGSQL; +-- step 2, create a procedure to run as a custom job +CREATE OR REPLACE PROCEDURE test_proc_with_check(job_id int, config jsonb) +LANGUAGE PLPGSQL +AS $$ +BEGIN + RAISE NOTICE 'Will only print this if config passes checks, my config is %', config; +END +$$; +-- step 3, add the job with the config check function passed as argument +-- test procedures, should get an unsupported error +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_proc'::regproc); +ERROR: unsupported function type +-- test functions +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func'::regproc); +ERROR: Config can be NULL but must have drop_after if not +select add_job('test_proc_with_check', '5 secs', config => NULL, check_config => 'test_config_check_func'::regproc); + add_job +--------- + 1005 +(1 row) + +select add_job('test_proc_with_check', '5 secs', config => '{"drop_after": "chicken"}', check_config => 'test_config_check_func'::regproc); +ERROR: invalid input syntax for type interval: "chicken" +select add_job('test_proc_with_check', '5 secs', config => '{"drop_after": "2 weeks"}', check_config => 'test_config_check_func'::regproc) +as job_with_func_check_id \gset +--- test alter_job +select alter_job(:job_with_func_check_id, config => '{"drop_after":"chicken"}'); +ERROR: invalid input syntax for type interval: "chicken" +select config from alter_job(:job_with_func_check_id, config => '{"drop_after":"5 years"}'); + config +--------------------------- + {"drop_after": "5 years"} +(1 row) + +-- test that jobs with an incorrect check function signature will not be registered +-- these are all incorrect function signatures +CREATE OR REPLACE FUNCTION test_config_check_func_0args() RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'I take no arguments and will validate anything you give me!'; +END +$$ LANGUAGE PLPGSQL; +CREATE OR REPLACE FUNCTION test_config_check_func_2args(config jsonb, intarg int) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'I take two arguments (jsonb, int) and I should fail to run!'; +END +$$ LANGUAGE PLPGSQL; +CREATE OR REPLACE FUNCTION test_config_check_func_intarg(config int) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'I take one argument which is an integer and I should fail to run!'; +END +$$ LANGUAGE PLPGSQL; +-- -- this should fail, it has an incorrect check function +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_0args'::regproc); +ERROR: function or procedure public.test_config_check_func_0args(config jsonb) not found +-- -- so should this +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_2args'::regproc); +ERROR: function or procedure public.test_config_check_func_2args(config jsonb) not found +-- and this +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_intarg'::regproc); +ERROR: function or procedure public.test_config_check_func_intarg(config jsonb) not found +-- and this fails as it calls a nonexistent function +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_nonexistent_check_func'::regproc); +ERROR: function "test_nonexistent_check_func" does not exist at character 82 +-- when called with a valid check function and a NULL config no check should occur +CREATE OR REPLACE FUNCTION test_config_check_func(config jsonb) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'This message will get printed for both NULL and not NULL config'; +END +$$ LANGUAGE PLPGSQL; +SET client_min_messages = NOTICE; +-- check done for both NULL and non-NULL config +select add_job('test_proc_with_check', '5 secs', config => NULL, check_config => 'test_config_check_func'::regproc); +NOTICE: This message will get printed for both NULL and not NULL config + add_job +--------- + 1007 +(1 row) + +-- check done +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func'::regproc) as job_id \gset +NOTICE: This message will get printed for both NULL and not NULL config +-- check function not returning void +CREATE OR REPLACE FUNCTION test_config_check_func_returns_int(config jsonb) RETURNS INT +AS $$ +BEGIN + raise notice 'I print a message, and then I return least(1,2)'; + RETURN LEAST(1, 2); +END +$$ LANGUAGE PLPGSQL; +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_returns_int'::regproc, +initial_start => :'time_zero'::timestamptz) as job_id_int \gset +NOTICE: I print a message, and then I return least(1,2) +-- drop the registered check function, verify that alter_job will work and print a warning that +-- the check is being skipped due to the check function missing +ALTER FUNCTION test_config_check_func RENAME TO renamed_func; +select job_id, schedule_interval, config, check_config from alter_job(:job_id, schedule_interval => '1 hour'); +WARNING: function public.test_config_check_func(config jsonb) not found, skipping config validation for job 1008 + job_id | schedule_interval | config | check_config +--------+-------------------+--------+------------------------------- + 1008 | @ 1 hour | {} | public.test_config_check_func +(1 row) + +DROP FUNCTION test_config_check_func_returns_int; +select job_id, schedule_interval, config, check_config from alter_job(:job_id_int, config => '{"field":"value"}'); +WARNING: function public.test_config_check_func_returns_int(config jsonb) not found, skipping config validation for job 1009 + job_id | schedule_interval | config | check_config +--------+-------------------+--------------------+------------------------------------------- + 1009 | @ 5 secs | {"field": "value"} | public.test_config_check_func_returns_int +(1 row) + +-- rename the check function and then call alter_job to register the new name +select job_id, schedule_interval, config, check_config from alter_job(:job_id, check_config => 'renamed_func'::regproc); +NOTICE: This message will get printed for both NULL and not NULL config + job_id | schedule_interval | config | check_config +--------+-------------------+--------+--------------------- + 1008 | @ 1 hour | {} | public.renamed_func +(1 row) + +-- run alter again, should get a config check +select job_id, schedule_interval, config, check_config from alter_job(:job_id, config => '{}'); +NOTICE: This message will get printed for both NULL and not NULL config + job_id | schedule_interval | config | check_config +--------+-------------------+--------+--------------------- + 1008 | @ 1 hour | {} | public.renamed_func +(1 row) + +-- do not drop the current check function but register a new one +CREATE OR REPLACE FUNCTION substitute_check_func(config jsonb) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'This message is a substitute of the previously printed one'; +END +$$ LANGUAGE PLPGSQL; +-- register the new check +select job_id, schedule_interval, config, check_config from alter_job(:job_id, check_config => 'substitute_check_func'); +NOTICE: This message is a substitute of the previously printed one + job_id | schedule_interval | config | check_config +--------+-------------------+--------+------------------------------ + 1008 | @ 1 hour | {} | public.substitute_check_func +(1 row) + +select job_id, schedule_interval, config, check_config from alter_job(:job_id, config => '{}'); +NOTICE: This message is a substitute of the previously printed one + job_id | schedule_interval | config | check_config +--------+-------------------+--------+------------------------------ + 1008 | @ 1 hour | {} | public.substitute_check_func +(1 row) + +RESET client_min_messages; +-- test an oid that doesn't exist +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 17424217::regproc); +ERROR: function with OID 17424217 does not exist +\c :TEST_DBNAME :ROLE_SUPERUSER +-- test a function with insufficient privileges +create schema test_schema; +create role user_noexec with login; +grant usage on schema test_schema to user_noexec; +CREATE OR REPLACE FUNCTION test_schema.test_config_check_func_privileges(config jsonb) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'This message will only get printed if privileges suffice'; +END +$$ LANGUAGE PLPGSQL; +revoke execute on function test_schema.test_config_check_func_privileges from public; +-- verify the user doesn't have execute permissions on the function +select has_function_privilege('user_noexec', 'test_schema.test_config_check_func_privileges(jsonb)', 'execute'); + has_function_privilege +------------------------ + f +(1 row) + +\c :TEST_DBNAME user_noexec +-- user_noexec should not have exec permissions on this function +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_schema.test_config_check_func_privileges'::regproc); +ERROR: permission denied for function "test_config_check_func_privileges" +\c :TEST_DBNAME :ROLE_SUPERUSER +-- check that alter_job rejects a check function with invalid signature +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'renamed_func', +initial_start => :'time_zero'::timestamptz) as job_id_alter \gset +NOTICE: This message will get printed for both NULL and not NULL config +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter, check_config => 'test_config_check_func_0args'); +ERROR: function or procedure public.test_config_check_func_0args(config jsonb) not found +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter); +NOTICE: This message will get printed for both NULL and not NULL config + job_id | schedule_interval | config | check_config +--------+-------------------+--------+--------------------- + 1010 | @ 5 secs | {} | public.renamed_func +(1 row) + +-- test that we can unregister the check function +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter, check_config => 0); + job_id | schedule_interval | config | check_config +--------+-------------------+--------+-------------- + 1010 | @ 5 secs | {} | +(1 row) + +-- no message printed now +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter, config => '{}'); + job_id | schedule_interval | config | check_config +--------+-------------------+--------+-------------- + 1010 | @ 5 secs | {} | +(1 row) + +-- test the case where we have a background job that registers jobs with a check fn +CREATE OR REPLACE PROCEDURE add_scheduled_jobs_with_check(job_id int, config jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + perform add_job('test_proc_with_check', schedule_interval => '10 secs', config => '{}', check_config => 'renamed_func'); +END +$$; +select add_job('add_scheduled_jobs_with_check', schedule_interval => '1 hour') as last_job_id \gset +-- wait for enough time +SELECT wait_for_job_to_run(:last_job_id, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +select total_runs, total_successes, last_run_status from timescaledb_information.job_stats where job_id = :last_job_id; + total_runs | total_successes | last_run_status +------------+-----------------+----------------- + 1 | 1 | Success +(1 row) + +-- test coverage for alter_job +-- registering an invalid oid +select alter_job(:job_id_alter, check_config => 123456789::regproc); +ERROR: function with OID 123456789 does not exist +-- registering a function with insufficient privileges +\c :TEST_DBNAME user_noexec +select * from add_job('test_proc_with_check', '5 secs', config => '{}') as job_id_owner \gset +select * from alter_job(:job_id_owner, check_config => 'test_schema.test_config_check_func_privileges'::regproc); +ERROR: permission denied for function "test_config_check_func_privileges" +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP SCHEMA test_schema CASCADE; +NOTICE: drop cascades to function test_schema.test_config_check_func_privileges(jsonb) +-- Delete all jobs with that owner before we can drop the user. +DELETE FROM _timescaledb_config.bgw_job WHERE owner = 'user_noexec'::regrole; +DROP ROLE user_noexec; +-- test with aggregate check proc +create function jsonb_add (j1 jsonb, j2 jsonb) returns jsonb +AS $$ +BEGIN + RETURN j1 || j2; +END +$$ LANGUAGE PLPGSQL; +CREATE AGGREGATE sum_jsb (jsonb) +( + sfunc = jsonb_add, + stype = jsonb, + initcond = '{}' +); +-- for test coverage, check unsupported aggregate type +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'sum_jsb'::regproc); +ERROR: unsupported function type +-- Cleanup jobs +TRUNCATE _timescaledb_config.bgw_job CASCADE; +NOTICE: truncate cascades to table "bgw_job_stat" +NOTICE: truncate cascades to table "bgw_policy_chunk_stats" +-- github issue 4610 +CREATE TABLE sensor_data +( + time timestamptz not null, + sensor_id integer not null, + cpu double precision null, + temperature double precision null +); +SELECT FROM create_hypertable('sensor_data','time'); +-- +(1 row) + +SELECT '2022-10-06 00:00:00+00' as start_date_sd \gset +INSERT INTO sensor_data + SELECT + time + (INTERVAL '1 minute' * random()) AS time, + sensor_id, + random() AS cpu, + random()* 100 AS temperature + FROM + generate_series(:'start_date_sd'::timestamptz - INTERVAL '1 months', :'start_date_sd'::timestamptz - INTERVAL '1 week', INTERVAL '1 minute') AS g1(time), + generate_series(1, 50, 1 ) AS g2(sensor_id) + ORDER BY + time; +-- enable compression +ALTER TABLE sensor_data SET (timescaledb.compress, timescaledb.compress_orderby = 'time DESC'); +-- create new chunks +INSERT INTO sensor_data + SELECT + time + (INTERVAL '1 minute' * random()) AS time, + sensor_id, + random() AS cpu, + random()* 100 AS temperature + FROM + generate_series(:'start_date_sd'::timestamptz - INTERVAL '2 months', :'start_date_sd'::timestamptz - INTERVAL '2 week', INTERVAL '2 minute') AS g1(time), + generate_series(1, 30, 1 ) AS g2(sensor_id) + ORDER BY + time; +-- get the name of a new uncompressed chunk +SELECT chunk_name AS new_uncompressed_chunk_name + FROM timescaledb_information.chunks + WHERE hypertable_name = 'sensor_data' AND NOT is_compressed LIMIT 1 \gset +-- change compression status so that this chunk is skipped when policy is run +update _timescaledb_catalog.chunk set status=3 where table_name = :'new_uncompressed_chunk_name'; +-- add new compression policy job +SELECT add_compression_policy('sensor_data', INTERVAL '1' minute) AS compressjob_id \gset +-- set recompress to true +SELECT alter_job(id,config:=jsonb_set(config,'{recompress}', 'true')) FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + alter_job +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1014,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""recompress"": true, ""hypertable_id"": 4, ""compress_after"": ""@ 1 min""}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +-- verify that there are other uncompressed new chunks that need to be compressed +SELECT count(*) > 1 + FROM timescaledb_information.chunks + WHERE hypertable_name = 'sensor_data' AND NOT is_compressed; + ?column? +---------- + t +(1 row) + +-- disable notice/warning as the new_uncompressed_chunk_name +-- is dynamic and it will be printed in those messages. +SET client_min_messages TO ERROR; +CALL run_job(:compressjob_id); +SET client_min_messages TO NOTICE; +-- check compression status is not changed for the chunk whose status was manually updated +SELECT status FROM _timescaledb_catalog.chunk where table_name = :'new_uncompressed_chunk_name'; + status +-------- + 3 +(1 row) + +-- confirm all the other new chunks are now compressed despite +-- facing an error when trying to compress :'new_uncompressed_chunk_name' +SELECT count(*) = 0 + FROM timescaledb_information.chunks + WHERE hypertable_name = 'sensor_data' AND NOT is_compressed; + ?column? +---------- + t +(1 row) + +-- cleanup +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +DROP TABLE sensor_data; +SELECT _timescaledb_functions.restart_background_workers(); + restart_background_workers +---------------------------- + t +(1 row) + +-- Github issue #5537 +-- Proc that waits until the given job enters the expected state +CREATE OR REPLACE PROCEDURE wait_for_job_status(job_param_id INTEGER, expected_status TEXT, spins INTEGER=:TEST_SPINWAIT_ITERS) +LANGUAGE PLPGSQL AS $$ +DECLARE + jobstatus TEXT; +BEGIN + FOR i in 1..spins + LOOP + SELECT job_status FROM timescaledb_information.job_stats WHERE job_id = job_param_id INTO jobstatus; + IF jobstatus = expected_status THEN + RETURN; + END IF; + PERFORM pg_sleep(0.1); + ROLLBACK; + END LOOP; + RAISE EXCEPTION 'wait_for_job_status(%): timeout after % tries', job_param_id, spins; +END; +$$; +-- Proc that sleeps for 1m - to keep the test jobs in running state +CREATE OR REPLACE PROCEDURE proc_that_sleeps(job_id INT, config JSONB) +LANGUAGE PLPGSQL AS +$$ +BEGIN + PERFORM pg_sleep(60); +END +$$; +-- create new jobs and ensure that the second one gets scheduled +-- before the first one by adjusting the initial_start values +SELECT add_job('proc_that_sleeps', '1h', initial_start => now()::timestamptz + interval '2s') AS job_id_1 \gset +SELECT add_job('proc_that_sleeps', '1h', initial_start => now()::timestamptz - interval '2s') AS job_id_2 \gset +-- wait for the jobs to start running job_2 will start running first +CALL wait_for_job_status(:job_id_2, 'Running'); +CALL wait_for_job_status(:job_id_1, 'Running'); +-- add a new job and wait for it to start +SELECT add_job('proc_that_sleeps', '1h') AS job_id_3 \gset +CALL wait_for_job_status(:job_id_3, 'Running'); +-- verify that none of the jobs crashed +SELECT job_id, job_status, next_start, + total_runs, total_successes, total_failures + FROM timescaledb_information.job_stats + WHERE job_id IN (:job_id_1, :job_id_2, :job_id_3) + ORDER BY job_id; + job_id | job_status | next_start | total_runs | total_successes | total_failures +--------+------------+------------+------------+-----------------+---------------- + 1015 | Running | -infinity | 1 | 0 | 0 + 1016 | Running | -infinity | 1 | 0 | 0 + 1017 | Running | -infinity | 1 | 0 | 0 +(3 rows) + +SELECT job_id, err_message + FROM timescaledb_information.job_errors + WHERE job_id IN (:job_id_1, :job_id_2, :job_id_3); + job_id | err_message +--------+------------- +(0 rows) + +-- cleanup +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +CALL wait_for_job_status(:job_id_1, 'Scheduled'); +CALL wait_for_job_status(:job_id_2, 'Scheduled'); +CALL wait_for_job_status(:job_id_3, 'Scheduled'); +SELECT delete_job(:job_id_1); + delete_job +------------ + +(1 row) + +SELECT delete_job(:job_id_2); + delete_job +------------ + +(1 row) + +SELECT delete_job(:job_id_3); + delete_job +------------ + +(1 row) + diff --git a/tsl/test/expected/compression.out b/tsl/test/expected/compression.out index 6319d7ff549..a47825e9a0b 100644 --- a/tsl/test/expected/compression.out +++ b/tsl/test/expected/compression.out @@ -108,10 +108,10 @@ before_compression_table_bytes | 8192 before_compression_index_bytes | 32768 before_compression_toast_bytes | 0 before_compression_total_bytes | 40960 -after_compression_table_bytes | 8192 +after_compression_table_bytes | 16384 after_compression_index_bytes | 16384 after_compression_toast_bytes | 8192 -after_compression_total_bytes | 32768 +after_compression_total_bytes | 40960 node_name | \x @@ -124,28 +124,30 @@ select compress_chunk( '_timescaledb_internal._hyper_1_1_chunk'); \x select * from _timescaledb_catalog.compression_chunk_size order by chunk_id; --[ RECORD 1 ]------------+------ -chunk_id | 1 -compressed_chunk_id | 6 -uncompressed_heap_size | 8192 -uncompressed_toast_size | 0 -uncompressed_index_size | 32768 -compressed_heap_size | 8192 -compressed_toast_size | 8192 -compressed_index_size | 16384 -numrows_pre_compression | 1 -numrows_post_compression | 1 --[ RECORD 2 ]------------+------ -chunk_id | 2 -compressed_chunk_id | 5 -uncompressed_heap_size | 8192 -uncompressed_toast_size | 0 -uncompressed_index_size | 32768 -compressed_heap_size | 8192 -compressed_toast_size | 8192 -compressed_index_size | 16384 -numrows_pre_compression | 1 -numrows_post_compression | 1 +-[ RECORD 1 ]--------------+------ +chunk_id | 1 +compressed_chunk_id | 6 +uncompressed_heap_size | 8192 +uncompressed_toast_size | 0 +uncompressed_index_size | 32768 +compressed_heap_size | 16384 +compressed_toast_size | 8192 +compressed_index_size | 16384 +numrows_pre_compression | 1 +numrows_post_compression | 1 +numrows_frozen_immediately | 1 +-[ RECORD 2 ]--------------+------ +chunk_id | 2 +compressed_chunk_id | 5 +uncompressed_heap_size | 8192 +uncompressed_toast_size | 0 +uncompressed_index_size | 32768 +compressed_heap_size | 16384 +compressed_toast_size | 8192 +compressed_index_size | 16384 +numrows_pre_compression | 1 +numrows_post_compression | 1 +numrows_frozen_immediately | 1 \x select ch1.id, ch1.schema_name, ch1.table_name , ch2.table_name as compress_table @@ -393,10 +395,10 @@ before_compression_table_bytes | 8192 before_compression_index_bytes | 16384 before_compression_toast_bytes | 8192 before_compression_total_bytes | 32768 -after_compression_table_bytes | 8192 +after_compression_table_bytes | 16384 after_compression_index_bytes | 16384 after_compression_toast_bytes | 8192 -after_compression_total_bytes | 32768 +after_compression_total_bytes | 40960 node_name | -[ RECORD 2 ]------------------+---------------------- chunk_schema | _timescaledb_internal @@ -406,10 +408,10 @@ before_compression_table_bytes | 8192 before_compression_index_bytes | 16384 before_compression_toast_bytes | 8192 before_compression_total_bytes | 32768 -after_compression_table_bytes | 8192 +after_compression_table_bytes | 16384 after_compression_index_bytes | 16384 after_compression_toast_bytes | 8192 -after_compression_total_bytes | 32768 +after_compression_total_bytes | 40960 node_name | select * from hypertable_compression_stats('foo'); @@ -420,10 +422,10 @@ before_compression_table_bytes | 8192 before_compression_index_bytes | 32768 before_compression_toast_bytes | 0 before_compression_total_bytes | 40960 -after_compression_table_bytes | 8192 +after_compression_table_bytes | 16384 after_compression_index_bytes | 16384 after_compression_toast_bytes | 8192 -after_compression_total_bytes | 32768 +after_compression_total_bytes | 40960 node_name | select * from hypertable_compression_stats('conditions'); @@ -434,10 +436,10 @@ before_compression_table_bytes | 16384 before_compression_index_bytes | 32768 before_compression_toast_bytes | 16384 before_compression_total_bytes | 65536 -after_compression_table_bytes | 16384 +after_compression_table_bytes | 32768 after_compression_index_bytes | 32768 after_compression_toast_bytes | 16384 -after_compression_total_bytes | 65536 +after_compression_total_bytes | 81920 node_name | vacuum full foo; diff --git a/tsl/test/expected/compression_bgw.out b/tsl/test/expected/compression_bgw-13.out similarity index 100% rename from tsl/test/expected/compression_bgw.out rename to tsl/test/expected/compression_bgw-13.out diff --git a/tsl/test/expected/compression_bgw-14.out b/tsl/test/expected/compression_bgw-14.out new file mode 100644 index 00000000000..6470ec0e451 --- /dev/null +++ b/tsl/test/expected/compression_bgw-14.out @@ -0,0 +1,657 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE ROLE NOLOGIN_ROLE WITH nologin noinherit; +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO NOLOGIN_ROLE; +GRANT NOLOGIN_ROLE TO :ROLE_DEFAULT_PERM_USER WITH ADMIN OPTION; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + location2 char(10) NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +select create_hypertable( 'conditions', 'time', chunk_time_interval=> '31days'::interval); + create_hypertable +------------------------- + (1,public,conditions,t) +(1 row) + +--TEST 1-- +--cannot set policy without enabling compression -- +\set ON_ERROR_STOP 0 +select add_compression_policy('conditions', '60d'::interval); +ERROR: compression not enabled on hypertable "conditions" +\set ON_ERROR_STOP 1 +-- TEST2 -- +--add a policy to compress chunks -- +alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'time'); +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75; +select add_compression_policy('conditions', '60d'::interval) AS compressjob_id +\gset +select * from _timescaledb_config.bgw_job where id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------+------------------------+--------------------------+---------- + 1000 | Compression Policy [1000] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 1 | {"hypertable_id": 1, "compress_after": "@ 60 days"} | _timescaledb_functions | policy_compression_check | +(1 row) + +select * from alter_job(:compressjob_id, schedule_interval=>'1s'); + job_id | schedule_interval | max_runtime | max_retries | retry_period | scheduled | config | next_start | check_config | fixed_schedule | initial_start | timezone +--------+-------------------+-------------+-------------+--------------+-----------+-----------------------------------------------------+------------+-------------------------------------------------+----------------+---------------+---------- + 1000 | @ 1 sec | @ 0 | -1 | @ 1 hour | t | {"hypertable_id": 1, "compress_after": "@ 60 days"} | -infinity | _timescaledb_functions.policy_compression_check | f | | +(1 row) + +--enable maxchunks to 1 so that only 1 chunk is compressed by the job +SELECT alter_job(id,config:=jsonb_set(config,'{maxchunks_to_compress}', '1')) + FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + (1000,"@ 1 sec","@ 0",-1,"@ 1 hour",t,"{""hypertable_id"": 1, ""compress_after"": ""@ 60 days"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +select * from _timescaledb_config.bgw_job where id >= 1000 ORDER BY id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+--------------------------+---------- + 1000 | Compression Policy [1000] | @ 1 sec | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 1 | {"hypertable_id": 1, "compress_after": "@ 60 days", "maxchunks_to_compress": 1} | _timescaledb_functions | policy_compression_check | +(1 row) + +insert into conditions +select now()::timestamp, 'TOK', 'sony', 55, 75; +-- TEST3 -- +--only the old chunks will get compressed when policy is executed-- +CALL run_job(:compressjob_id); +select chunk_name, pg_size_pretty(before_compression_total_bytes) before_total, +pg_size_pretty( after_compression_total_bytes) after_total +from chunk_compression_stats('conditions') where compression_status like 'Compressed' order by chunk_name; + chunk_name | before_total | after_total +------------------+--------------+------------- + _hyper_1_1_chunk | 32 kB | 40 kB +(1 row) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk ORDER BY id; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+--------------------------+---------------------+---------+--------+----------- + 1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | 4 | f | 1 | f + 2 | 1 | _timescaledb_internal | _hyper_1_2_chunk | | f | 0 | f + 3 | 1 | _timescaledb_internal | _hyper_1_3_chunk | | f | 0 | f + 4 | 2 | _timescaledb_internal | compress_hyper_2_4_chunk | | f | 0 | f +(4 rows) + +-- TEST 4 -- +--cannot set another policy +\set ON_ERROR_STOP 0 +select add_compression_policy('conditions', '60d'::interval, if_not_exists=>true); +NOTICE: compression policy already exists for hypertable "conditions", skipping + add_compression_policy +------------------------ + -1 +(1 row) + +select add_compression_policy('conditions', '60d'::interval); +ERROR: compression policy already exists for hypertable or continuous aggregate "conditions" +select add_compression_policy('conditions', '30d'::interval, if_not_exists=>true); +WARNING: compression policy already exists for hypertable "conditions" + add_compression_policy +------------------------ + -1 +(1 row) + +\set ON_ERROR_STOP 1 +--TEST 5 -- +-- drop the policy -- +select remove_compression_policy('conditions'); + remove_compression_policy +--------------------------- + t +(1 row) + +select count(*) from _timescaledb_config.bgw_job WHERE id>=1000; + count +------- + 0 +(1 row) + +--TEST 6 -- +-- try to execute the policy after it has been dropped -- +\set ON_ERROR_STOP 0 +CALL run_job(:compressjob_id); +ERROR: job 1000 not found +--errors with bad input for add/remove compression policy +create view dummyv1 as select * from conditions limit 1; +select add_compression_policy( 100 , compress_after=> '1 day'::interval); +ERROR: object with id "100" not found +select add_compression_policy( 'dummyv1', compress_after=> '1 day'::interval ); +ERROR: "dummyv1" is not a hypertable or a continuous aggregate +select remove_compression_policy( 100 ); +ERROR: relation is not a hypertable or continuous aggregate +\set ON_ERROR_STOP 1 +-- We're done with the table, so drop it. +DROP TABLE IF EXISTS conditions CASCADE; +NOTICE: drop cascades to table _timescaledb_internal.compress_hyper_2_4_chunk +NOTICE: drop cascades to view dummyv1 +--TEST 7 +--compression policy for smallint, integer or bigint based partition hypertable +--smallint test +CREATE TABLE test_table_smallint(time SMALLINT, val SMALLINT); +SELECT create_hypertable('test_table_smallint', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------------- + (3,public,test_table_smallint,t) +(1 row) + +CREATE OR REPLACE FUNCTION dummy_now_smallint() RETURNS SMALLINT LANGUAGE SQL IMMUTABLE AS 'SELECT 5::SMALLINT'; +SELECT set_integer_now_func('test_table_smallint', 'dummy_now_smallint'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_smallint SELECT generate_series(1,5), 10; +ALTER TABLE test_table_smallint SET (timescaledb.compress); +\set ON_ERROR_STOP 0 +select add_compression_policy( 'test_table_smallint', compress_after=> '1 day'::interval ); +ERROR: unsupported compress_after argument type, expected type : smallint +\set ON_ERROR_STOP 1 +SELECT add_compression_policy('test_table_smallint', 2::SMALLINT) AS compressjob_id \gset +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+---------- + 1001 | Compression Policy [1001] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": 2} | _timescaledb_functions | policy_compression_check | +(1 row) + +--will compress all chunks that need compression +CALL run_job(:compressjob_id); +SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes +FROM chunk_compression_stats('test_table_smallint') +WHERE compression_status LIKE 'Compressed' +ORDER BY chunk_name; + chunk_name | before_compression_total_bytes | after_compression_total_bytes +------------------+--------------------------------+------------------------------- + _hyper_3_5_chunk | 24576 | 24576 + _hyper_3_6_chunk | 24576 | 24576 +(2 rows) + +--integer tests +CREATE TABLE test_table_integer(time INTEGER, val INTEGER); +SELECT create_hypertable('test_table_integer', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------------- + (5,public,test_table_integer,t) +(1 row) + +CREATE OR REPLACE FUNCTION dummy_now_integer() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS 'SELECT 5::INTEGER'; +SELECT set_integer_now_func('test_table_integer', 'dummy_now_integer'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_integer SELECT generate_series(1,5), 10; +ALTER TABLE test_table_integer SET (timescaledb.compress); +SELECT add_compression_policy('test_table_integer', 2::INTEGER) AS compressjob_id \gset +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+---------- + 1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 5 | {"hypertable_id": 5, "compress_after": 2} | _timescaledb_functions | policy_compression_check | +(1 row) + +--will compress all chunks that need compression +CALL run_job(:compressjob_id); +SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes +FROM chunk_compression_stats('test_table_integer') +WHERE compression_status LIKE 'Compressed' +ORDER BY chunk_name; + chunk_name | before_compression_total_bytes | after_compression_total_bytes +-------------------+--------------------------------+------------------------------- + _hyper_5_12_chunk | 24576 | 24576 + _hyper_5_13_chunk | 24576 | 24576 +(2 rows) + +--bigint test +CREATE TABLE test_table_bigint(time BIGINT, val BIGINT); +SELECT create_hypertable('test_table_bigint', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +-------------------------------- + (7,public,test_table_bigint,t) +(1 row) + +CREATE OR REPLACE FUNCTION dummy_now_bigint() RETURNS BIGINT LANGUAGE SQL IMMUTABLE AS 'SELECT 5::BIGINT'; +SELECT set_integer_now_func('test_table_bigint', 'dummy_now_bigint'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_bigint SELECT generate_series(1,5), 10; +ALTER TABLE test_table_bigint SET (timescaledb.compress); +SELECT add_compression_policy('test_table_bigint', 2::BIGINT) AS compressjob_id \gset +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+---------- + 1003 | Compression Policy [1003] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 7 | {"hypertable_id": 7, "compress_after": 2} | _timescaledb_functions | policy_compression_check | +(1 row) + +--will compress all chunks that need compression +CALL run_job(:compressjob_id); +SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes +FROM chunk_compression_stats('test_table_bigint') +WHERE compression_status LIKE 'Compressed' +ORDER BY chunk_name; + chunk_name | before_compression_total_bytes | after_compression_total_bytes +-------------------+--------------------------------+------------------------------- + _hyper_7_19_chunk | 24576 | 24576 + _hyper_7_20_chunk | 24576 | 24576 +(2 rows) + +--TEST 8 +--hypertable owner lacks permission to start background worker +SET ROLE NOLOGIN_ROLE; +CREATE TABLE test_table_nologin(time bigint, val int); +SELECT create_hypertable('test_table_nologin', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------------- + (9,public,test_table_nologin,t) +(1 row) + +SELECT set_integer_now_func('test_table_nologin', 'dummy_now_bigint'); + set_integer_now_func +---------------------- + +(1 row) + +ALTER TABLE test_table_nologin set (timescaledb.compress); +\set ON_ERROR_STOP 0 +SELECT add_compression_policy('test_table_nologin', 2::int); +ERROR: permission denied to start background process as role "nologin_role" +\set ON_ERROR_STOP 1 +DROP TABLE test_table_nologin; +RESET ROLE; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE conditions( + time TIMESTAMPTZ NOT NULL, + device INTEGER, + temperature FLOAT +); +SELECT * FROM create_hypertable('conditions', 'time', + chunk_time_interval => '1 day'::interval); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 11 | public | conditions | t +(1 row) + +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '10 min') AS time; +CREATE MATERIALIZED VIEW conditions_summary +WITH (timescaledb.continuous) AS +SELECT device, + time_bucket(INTERVAL '1 hour', "time") AS day, + AVG(temperature) AS avg_temperature, + MAX(temperature) AS max_temperature, + MIN(temperature) AS min_temperature +FROM conditions +GROUP BY device, time_bucket(INTERVAL '1 hour', "time") WITH NO DATA; +CALL refresh_continuous_aggregate('conditions_summary', NULL, NULL); +ALTER TABLE conditions SET (timescaledb.compress); +SELECT COUNT(*) AS dropped_chunks_count + FROM drop_chunks('conditions', TIMESTAMPTZ '2018-12-15 00:00'); + dropped_chunks_count +---------------------- + 14 +(1 row) + +-- We need to have some chunks that are marked as dropped, otherwise +-- we will not have a problem below. +SELECT COUNT(*) AS dropped_chunks_count + FROM _timescaledb_catalog.chunk + WHERE dropped = TRUE; + dropped_chunks_count +---------------------- + 14 +(1 row) + +SELECT count(*) FROM timescaledb_information.chunks +WHERE hypertable_name = 'conditions' and is_compressed = true; + count +------- + 0 +(1 row) + +SELECT add_compression_policy AS job_id + FROM add_compression_policy('conditions', INTERVAL '1 day') \gset +-- job compresses only 1 chunk at a time -- +SELECT alter_job(id,config:=jsonb_set(config,'{maxchunks_to_compress}', '1')) + FROM _timescaledb_config.bgw_job WHERE id = :job_id; + alter_job +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1004,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""hypertable_id"": 11, ""compress_after"": ""@ 1 day"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +SELECT alter_job(id,config:=jsonb_set(config,'{verbose_log}', 'true')) + FROM _timescaledb_config.bgw_job WHERE id = :job_id; + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1004,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""verbose_log"": true, ""hypertable_id"": 11, ""compress_after"": ""@ 1 day"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +set client_min_messages TO LOG; +CALL run_job(:job_id); +LOG: statement: CALL run_job(1004); +LOG: job 1004 completed processing chunk _timescaledb_internal._hyper_11_40_chunk +set client_min_messages TO NOTICE; +LOG: statement: set client_min_messages TO NOTICE; +SELECT count(*) FROM timescaledb_information.chunks +WHERE hypertable_name = 'conditions' and is_compressed = true; + count +------- + 1 +(1 row) + +\i include/recompress_basic.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE OR REPLACE VIEW compressed_chunk_info_view AS +SELECT + h.schema_name AS hypertable_schema, + h.table_name AS hypertable_name, + c.schema_name as chunk_schema, + c.table_name as chunk_name, + c.status as chunk_status, + comp.schema_name as compressed_chunk_schema, + comp.table_name as compressed_chunk_name +FROM + _timescaledb_catalog.hypertable h JOIN + _timescaledb_catalog.chunk c ON h.id = c.hypertable_id + LEFT JOIN _timescaledb_catalog.chunk comp +ON comp.id = c.compressed_chunk_id +; +CREATE TABLE test2 (timec timestamptz NOT NULL, i integer , + b bigint, t text); +SELECT table_name from create_hypertable('test2', 'timec', chunk_time_interval=> INTERVAL '7 days'); + table_name +------------ + test2 +(1 row) + +INSERT INTO test2 SELECT q, 10, 11, 'hello' FROM generate_series( '2020-01-03 10:00:00+00', '2020-01-03 12:00:00+00' , '5 min'::interval) q; +ALTER TABLE test2 set (timescaledb.compress, +timescaledb.compress_segmentby = 'b', +timescaledb.compress_orderby = 'timec DESC'); +SELECT compress_chunk(c) +FROM show_chunks('test2') c; + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_14_62_chunk +(1 row) + +---insert into the middle of the range --- +INSERT INTO test2 values ( '2020-01-03 10:01:00+00', 20, 11, '2row'); +INSERT INTO test2 values ( '2020-01-03 11:01:00+00', 20, 11, '3row'); +INSERT INTO test2 values ( '2020-01-03 12:01:00+00', 20, 11, '4row'); +--- insert a new segment by --- +INSERT INTO test2 values ( '2020-01-03 11:01:00+00', 20, 12, '12row'); +SELECT time_bucket(INTERVAL '2 hour', timec), b, count(*) +FROM test2 +GROUP BY time_bucket(INTERVAL '2 hour', timec), b +ORDER BY 1, 2; + time_bucket | b | count +------------------------------+----+------- + Fri Jan 03 02:00:00 2020 PST | 11 | 26 + Fri Jan 03 02:00:00 2020 PST | 12 | 1 + Fri Jan 03 04:00:00 2020 PST | 11 | 2 +(3 rows) + +--check status for chunk -- +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 9 | _hyper_14_62_chunk +(1 row) + +SELECT compressed_chunk_schema || '.' || compressed_chunk_name as "COMP_CHUNK_NAME", + chunk_schema || '.' || chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' \gset +SELECT count(*) from test2; + count +------- + 29 +(1 row) + +-- call recompress_chunk inside a transaction. This should fails since +-- it contains transaction-terminating commands. +\set ON_ERROR_STOP 0 +START TRANSACTION; +CALL recompress_chunk(:'CHUNK_NAME'::regclass); +ROLLBACK; +\set ON_ERROR_STOP 1 +CALL recompress_chunk(:'CHUNK_NAME'::regclass); +-- Demonstrate that no locks are held on the hypertable, chunk, or the +-- compressed chunk after recompress_chunk has executed. +SELECT pid, locktype, relation, relation::regclass, mode, granted +FROM pg_locks +WHERE relation::regclass::text IN (:'CHUNK_NAME', :'COMP_CHUNK_NAME', 'test2') +ORDER BY pid; + pid | locktype | relation | relation | mode | granted +-----+----------+----------+----------+------+--------- +(0 rows) + +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_14_62_chunk +(1 row) + +--- insert into a compressed chunk again + a new chunk-- +INSERT INTO test2 values ( '2020-01-03 11:01:03+00', 20, 11, '33row'), + ( '2020-01-03 11:01:06+00', 20, 11, '36row'), + ( '2020-01-03 11:02:00+00', 20, 12, '12row'), + ( '2020-04-03 00:02:00+00', 30, 13, '3013row'); +SELECT time_bucket(INTERVAL '2 hour', timec), b, count(*) +FROM test2 +GROUP BY time_bucket(INTERVAL '2 hour', timec), b +ORDER BY 1, 2; + time_bucket | b | count +------------------------------+----+------- + Fri Jan 03 02:00:00 2020 PST | 11 | 28 + Fri Jan 03 02:00:00 2020 PST | 12 | 2 + Fri Jan 03 04:00:00 2020 PST | 11 | 2 + Thu Apr 02 17:00:00 2020 PDT | 13 | 1 +(4 rows) + +--chunk status should be unordered for the previously compressed chunk +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 9 | _hyper_14_62_chunk + 0 | _hyper_14_64_chunk +(2 rows) + +SELECT add_compression_policy AS job_id + FROM add_compression_policy('test2', '30d'::interval) \gset +CALL run_job(:job_id); +CALL run_job(:job_id); +-- status should be compressed --- +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_14_62_chunk + 1 | _hyper_14_64_chunk +(2 rows) + +\set ON_ERROR_STOP 0 +-- call recompress_chunk when status is not unordered +CALL recompress_chunk(:'CHUNK_NAME'::regclass, true); +psql:include/recompress_basic.sql:110: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk" +-- This will succeed and compress the chunk for the test below. +CALL recompress_chunk(:'CHUNK_NAME'::regclass, false); +psql:include/recompress_basic.sql:113: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk" +--now decompress it , then try and recompress +SELECT decompress_chunk(:'CHUNK_NAME'::regclass); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_14_62_chunk +(1 row) + +CALL recompress_chunk(:'CHUNK_NAME'::regclass); +psql:include/recompress_basic.sql:117: ERROR: call compress_chunk instead of recompress_chunk +\set ON_ERROR_STOP 1 +-- test recompress policy +CREATE TABLE metrics(time timestamptz NOT NULL); +SELECT hypertable_id AS "HYPERTABLE_ID", schema_name, table_name, created FROM create_hypertable('metrics','time') \gset +ALTER TABLE metrics SET (timescaledb.compress); +-- create chunk with some data and compress +INSERT INTO metrics SELECT '2000-01-01' FROM generate_series(1,10); +-- create custom compression job without recompress boolean +SELECT add_job('_timescaledb_functions.policy_compression','1w',('{"hypertable_id": '||:'HYPERTABLE_ID'||', "compress_after": "@ 7 days"}')::jsonb, initial_start => '2000-01-01 00:00:00+00'::timestamptz) AS "JOB_COMPRESS" \gset +-- first call should compress +CALL run_job(:JOB_COMPRESS); +-- 2nd call should do nothing +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- do an INSERT so recompress has something to do +INSERT INTO metrics SELECT '2000-01-01'; +---- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +-- should recompress +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- disable recompress in compress job +SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','false'), next_start => '2000-01-01 00:00:00+00'::timestamptz) FROM _timescaledb_config.bgw_job WHERE id = :JOB_COMPRESS; + alter_job +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1006,"@ 7 days","@ 0",-1,"@ 5 mins",t,"{""recompress"": false, ""hypertable_id"": 16, ""compress_after"": ""@ 7 days""}","Fri Dec 31 16:00:00 1999 PST",,t,"Fri Dec 31 16:00:00 1999 PST",) +(1 row) + +-- nothing to do +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- do an INSERT so recompress has something to do +INSERT INTO metrics SELECT '2000-01-01'; +---- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +-- still nothing to do since we disabled recompress +CALL run_job(:JOB_COMPRESS); +---- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +-- reenable recompress in compress job +SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','true'), next_start => '2000-01-01 00:00:00+00'::timestamptz) FROM _timescaledb_config.bgw_job WHERE id = :JOB_COMPRESS; + alter_job +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1006,"@ 7 days","@ 0",-1,"@ 5 mins",t,"{""recompress"": true, ""hypertable_id"": 16, ""compress_after"": ""@ 7 days""}","Fri Dec 31 16:00:00 1999 PST",,t,"Fri Dec 31 16:00:00 1999 PST",) +(1 row) + +-- should recompress now +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +SELECT delete_job(:JOB_COMPRESS); + delete_job +------------ + +(1 row) + +SELECT add_job('_timescaledb_functions.policy_recompression','1w',('{"hypertable_id": '||:'HYPERTABLE_ID'||', "recompress_after": "@ 7 days", "maxchunks_to_compress": 1}')::jsonb) AS "JOB_RECOMPRESS" \gset +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +---- nothing to do yet +CALL run_job(:JOB_RECOMPRESS); +psql:include/recompress_basic.sql:189: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- create some work for recompress +INSERT INTO metrics SELECT '2000-01-01'; +-- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +CALL run_job(:JOB_RECOMPRESS); +-- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +SELECT delete_job(:JOB_RECOMPRESS); + delete_job +------------ + +(1 row) + +-- Teardown test +\c :TEST_DBNAME :ROLE_SUPERUSER +REVOKE CREATE ON SCHEMA public FROM NOLOGIN_ROLE; +DROP ROLE NOLOGIN_ROLE; diff --git a/tsl/test/expected/compression_bgw-15.out b/tsl/test/expected/compression_bgw-15.out new file mode 100644 index 00000000000..6470ec0e451 --- /dev/null +++ b/tsl/test/expected/compression_bgw-15.out @@ -0,0 +1,657 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE ROLE NOLOGIN_ROLE WITH nologin noinherit; +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO NOLOGIN_ROLE; +GRANT NOLOGIN_ROLE TO :ROLE_DEFAULT_PERM_USER WITH ADMIN OPTION; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + location2 char(10) NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +select create_hypertable( 'conditions', 'time', chunk_time_interval=> '31days'::interval); + create_hypertable +------------------------- + (1,public,conditions,t) +(1 row) + +--TEST 1-- +--cannot set policy without enabling compression -- +\set ON_ERROR_STOP 0 +select add_compression_policy('conditions', '60d'::interval); +ERROR: compression not enabled on hypertable "conditions" +\set ON_ERROR_STOP 1 +-- TEST2 -- +--add a policy to compress chunks -- +alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'time'); +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75; +select add_compression_policy('conditions', '60d'::interval) AS compressjob_id +\gset +select * from _timescaledb_config.bgw_job where id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------+------------------------+--------------------------+---------- + 1000 | Compression Policy [1000] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 1 | {"hypertable_id": 1, "compress_after": "@ 60 days"} | _timescaledb_functions | policy_compression_check | +(1 row) + +select * from alter_job(:compressjob_id, schedule_interval=>'1s'); + job_id | schedule_interval | max_runtime | max_retries | retry_period | scheduled | config | next_start | check_config | fixed_schedule | initial_start | timezone +--------+-------------------+-------------+-------------+--------------+-----------+-----------------------------------------------------+------------+-------------------------------------------------+----------------+---------------+---------- + 1000 | @ 1 sec | @ 0 | -1 | @ 1 hour | t | {"hypertable_id": 1, "compress_after": "@ 60 days"} | -infinity | _timescaledb_functions.policy_compression_check | f | | +(1 row) + +--enable maxchunks to 1 so that only 1 chunk is compressed by the job +SELECT alter_job(id,config:=jsonb_set(config,'{maxchunks_to_compress}', '1')) + FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + (1000,"@ 1 sec","@ 0",-1,"@ 1 hour",t,"{""hypertable_id"": 1, ""compress_after"": ""@ 60 days"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +select * from _timescaledb_config.bgw_job where id >= 1000 ORDER BY id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+--------------------------+---------- + 1000 | Compression Policy [1000] | @ 1 sec | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 1 | {"hypertable_id": 1, "compress_after": "@ 60 days", "maxchunks_to_compress": 1} | _timescaledb_functions | policy_compression_check | +(1 row) + +insert into conditions +select now()::timestamp, 'TOK', 'sony', 55, 75; +-- TEST3 -- +--only the old chunks will get compressed when policy is executed-- +CALL run_job(:compressjob_id); +select chunk_name, pg_size_pretty(before_compression_total_bytes) before_total, +pg_size_pretty( after_compression_total_bytes) after_total +from chunk_compression_stats('conditions') where compression_status like 'Compressed' order by chunk_name; + chunk_name | before_total | after_total +------------------+--------------+------------- + _hyper_1_1_chunk | 32 kB | 40 kB +(1 row) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk ORDER BY id; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+--------------------------+---------------------+---------+--------+----------- + 1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | 4 | f | 1 | f + 2 | 1 | _timescaledb_internal | _hyper_1_2_chunk | | f | 0 | f + 3 | 1 | _timescaledb_internal | _hyper_1_3_chunk | | f | 0 | f + 4 | 2 | _timescaledb_internal | compress_hyper_2_4_chunk | | f | 0 | f +(4 rows) + +-- TEST 4 -- +--cannot set another policy +\set ON_ERROR_STOP 0 +select add_compression_policy('conditions', '60d'::interval, if_not_exists=>true); +NOTICE: compression policy already exists for hypertable "conditions", skipping + add_compression_policy +------------------------ + -1 +(1 row) + +select add_compression_policy('conditions', '60d'::interval); +ERROR: compression policy already exists for hypertable or continuous aggregate "conditions" +select add_compression_policy('conditions', '30d'::interval, if_not_exists=>true); +WARNING: compression policy already exists for hypertable "conditions" + add_compression_policy +------------------------ + -1 +(1 row) + +\set ON_ERROR_STOP 1 +--TEST 5 -- +-- drop the policy -- +select remove_compression_policy('conditions'); + remove_compression_policy +--------------------------- + t +(1 row) + +select count(*) from _timescaledb_config.bgw_job WHERE id>=1000; + count +------- + 0 +(1 row) + +--TEST 6 -- +-- try to execute the policy after it has been dropped -- +\set ON_ERROR_STOP 0 +CALL run_job(:compressjob_id); +ERROR: job 1000 not found +--errors with bad input for add/remove compression policy +create view dummyv1 as select * from conditions limit 1; +select add_compression_policy( 100 , compress_after=> '1 day'::interval); +ERROR: object with id "100" not found +select add_compression_policy( 'dummyv1', compress_after=> '1 day'::interval ); +ERROR: "dummyv1" is not a hypertable or a continuous aggregate +select remove_compression_policy( 100 ); +ERROR: relation is not a hypertable or continuous aggregate +\set ON_ERROR_STOP 1 +-- We're done with the table, so drop it. +DROP TABLE IF EXISTS conditions CASCADE; +NOTICE: drop cascades to table _timescaledb_internal.compress_hyper_2_4_chunk +NOTICE: drop cascades to view dummyv1 +--TEST 7 +--compression policy for smallint, integer or bigint based partition hypertable +--smallint test +CREATE TABLE test_table_smallint(time SMALLINT, val SMALLINT); +SELECT create_hypertable('test_table_smallint', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------------- + (3,public,test_table_smallint,t) +(1 row) + +CREATE OR REPLACE FUNCTION dummy_now_smallint() RETURNS SMALLINT LANGUAGE SQL IMMUTABLE AS 'SELECT 5::SMALLINT'; +SELECT set_integer_now_func('test_table_smallint', 'dummy_now_smallint'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_smallint SELECT generate_series(1,5), 10; +ALTER TABLE test_table_smallint SET (timescaledb.compress); +\set ON_ERROR_STOP 0 +select add_compression_policy( 'test_table_smallint', compress_after=> '1 day'::interval ); +ERROR: unsupported compress_after argument type, expected type : smallint +\set ON_ERROR_STOP 1 +SELECT add_compression_policy('test_table_smallint', 2::SMALLINT) AS compressjob_id \gset +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+---------- + 1001 | Compression Policy [1001] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": 2} | _timescaledb_functions | policy_compression_check | +(1 row) + +--will compress all chunks that need compression +CALL run_job(:compressjob_id); +SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes +FROM chunk_compression_stats('test_table_smallint') +WHERE compression_status LIKE 'Compressed' +ORDER BY chunk_name; + chunk_name | before_compression_total_bytes | after_compression_total_bytes +------------------+--------------------------------+------------------------------- + _hyper_3_5_chunk | 24576 | 24576 + _hyper_3_6_chunk | 24576 | 24576 +(2 rows) + +--integer tests +CREATE TABLE test_table_integer(time INTEGER, val INTEGER); +SELECT create_hypertable('test_table_integer', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------------- + (5,public,test_table_integer,t) +(1 row) + +CREATE OR REPLACE FUNCTION dummy_now_integer() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS 'SELECT 5::INTEGER'; +SELECT set_integer_now_func('test_table_integer', 'dummy_now_integer'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_integer SELECT generate_series(1,5), 10; +ALTER TABLE test_table_integer SET (timescaledb.compress); +SELECT add_compression_policy('test_table_integer', 2::INTEGER) AS compressjob_id \gset +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+---------- + 1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 5 | {"hypertable_id": 5, "compress_after": 2} | _timescaledb_functions | policy_compression_check | +(1 row) + +--will compress all chunks that need compression +CALL run_job(:compressjob_id); +SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes +FROM chunk_compression_stats('test_table_integer') +WHERE compression_status LIKE 'Compressed' +ORDER BY chunk_name; + chunk_name | before_compression_total_bytes | after_compression_total_bytes +-------------------+--------------------------------+------------------------------- + _hyper_5_12_chunk | 24576 | 24576 + _hyper_5_13_chunk | 24576 | 24576 +(2 rows) + +--bigint test +CREATE TABLE test_table_bigint(time BIGINT, val BIGINT); +SELECT create_hypertable('test_table_bigint', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +-------------------------------- + (7,public,test_table_bigint,t) +(1 row) + +CREATE OR REPLACE FUNCTION dummy_now_bigint() RETURNS BIGINT LANGUAGE SQL IMMUTABLE AS 'SELECT 5::BIGINT'; +SELECT set_integer_now_func('test_table_bigint', 'dummy_now_bigint'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_bigint SELECT generate_series(1,5), 10; +ALTER TABLE test_table_bigint SET (timescaledb.compress); +SELECT add_compression_policy('test_table_bigint', 2::BIGINT) AS compressjob_id \gset +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+---------- + 1003 | Compression Policy [1003] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 7 | {"hypertable_id": 7, "compress_after": 2} | _timescaledb_functions | policy_compression_check | +(1 row) + +--will compress all chunks that need compression +CALL run_job(:compressjob_id); +SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes +FROM chunk_compression_stats('test_table_bigint') +WHERE compression_status LIKE 'Compressed' +ORDER BY chunk_name; + chunk_name | before_compression_total_bytes | after_compression_total_bytes +-------------------+--------------------------------+------------------------------- + _hyper_7_19_chunk | 24576 | 24576 + _hyper_7_20_chunk | 24576 | 24576 +(2 rows) + +--TEST 8 +--hypertable owner lacks permission to start background worker +SET ROLE NOLOGIN_ROLE; +CREATE TABLE test_table_nologin(time bigint, val int); +SELECT create_hypertable('test_table_nologin', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------------- + (9,public,test_table_nologin,t) +(1 row) + +SELECT set_integer_now_func('test_table_nologin', 'dummy_now_bigint'); + set_integer_now_func +---------------------- + +(1 row) + +ALTER TABLE test_table_nologin set (timescaledb.compress); +\set ON_ERROR_STOP 0 +SELECT add_compression_policy('test_table_nologin', 2::int); +ERROR: permission denied to start background process as role "nologin_role" +\set ON_ERROR_STOP 1 +DROP TABLE test_table_nologin; +RESET ROLE; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE conditions( + time TIMESTAMPTZ NOT NULL, + device INTEGER, + temperature FLOAT +); +SELECT * FROM create_hypertable('conditions', 'time', + chunk_time_interval => '1 day'::interval); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 11 | public | conditions | t +(1 row) + +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '10 min') AS time; +CREATE MATERIALIZED VIEW conditions_summary +WITH (timescaledb.continuous) AS +SELECT device, + time_bucket(INTERVAL '1 hour', "time") AS day, + AVG(temperature) AS avg_temperature, + MAX(temperature) AS max_temperature, + MIN(temperature) AS min_temperature +FROM conditions +GROUP BY device, time_bucket(INTERVAL '1 hour', "time") WITH NO DATA; +CALL refresh_continuous_aggregate('conditions_summary', NULL, NULL); +ALTER TABLE conditions SET (timescaledb.compress); +SELECT COUNT(*) AS dropped_chunks_count + FROM drop_chunks('conditions', TIMESTAMPTZ '2018-12-15 00:00'); + dropped_chunks_count +---------------------- + 14 +(1 row) + +-- We need to have some chunks that are marked as dropped, otherwise +-- we will not have a problem below. +SELECT COUNT(*) AS dropped_chunks_count + FROM _timescaledb_catalog.chunk + WHERE dropped = TRUE; + dropped_chunks_count +---------------------- + 14 +(1 row) + +SELECT count(*) FROM timescaledb_information.chunks +WHERE hypertable_name = 'conditions' and is_compressed = true; + count +------- + 0 +(1 row) + +SELECT add_compression_policy AS job_id + FROM add_compression_policy('conditions', INTERVAL '1 day') \gset +-- job compresses only 1 chunk at a time -- +SELECT alter_job(id,config:=jsonb_set(config,'{maxchunks_to_compress}', '1')) + FROM _timescaledb_config.bgw_job WHERE id = :job_id; + alter_job +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1004,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""hypertable_id"": 11, ""compress_after"": ""@ 1 day"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +SELECT alter_job(id,config:=jsonb_set(config,'{verbose_log}', 'true')) + FROM _timescaledb_config.bgw_job WHERE id = :job_id; + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1004,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""verbose_log"": true, ""hypertable_id"": 11, ""compress_after"": ""@ 1 day"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +set client_min_messages TO LOG; +CALL run_job(:job_id); +LOG: statement: CALL run_job(1004); +LOG: job 1004 completed processing chunk _timescaledb_internal._hyper_11_40_chunk +set client_min_messages TO NOTICE; +LOG: statement: set client_min_messages TO NOTICE; +SELECT count(*) FROM timescaledb_information.chunks +WHERE hypertable_name = 'conditions' and is_compressed = true; + count +------- + 1 +(1 row) + +\i include/recompress_basic.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE OR REPLACE VIEW compressed_chunk_info_view AS +SELECT + h.schema_name AS hypertable_schema, + h.table_name AS hypertable_name, + c.schema_name as chunk_schema, + c.table_name as chunk_name, + c.status as chunk_status, + comp.schema_name as compressed_chunk_schema, + comp.table_name as compressed_chunk_name +FROM + _timescaledb_catalog.hypertable h JOIN + _timescaledb_catalog.chunk c ON h.id = c.hypertable_id + LEFT JOIN _timescaledb_catalog.chunk comp +ON comp.id = c.compressed_chunk_id +; +CREATE TABLE test2 (timec timestamptz NOT NULL, i integer , + b bigint, t text); +SELECT table_name from create_hypertable('test2', 'timec', chunk_time_interval=> INTERVAL '7 days'); + table_name +------------ + test2 +(1 row) + +INSERT INTO test2 SELECT q, 10, 11, 'hello' FROM generate_series( '2020-01-03 10:00:00+00', '2020-01-03 12:00:00+00' , '5 min'::interval) q; +ALTER TABLE test2 set (timescaledb.compress, +timescaledb.compress_segmentby = 'b', +timescaledb.compress_orderby = 'timec DESC'); +SELECT compress_chunk(c) +FROM show_chunks('test2') c; + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_14_62_chunk +(1 row) + +---insert into the middle of the range --- +INSERT INTO test2 values ( '2020-01-03 10:01:00+00', 20, 11, '2row'); +INSERT INTO test2 values ( '2020-01-03 11:01:00+00', 20, 11, '3row'); +INSERT INTO test2 values ( '2020-01-03 12:01:00+00', 20, 11, '4row'); +--- insert a new segment by --- +INSERT INTO test2 values ( '2020-01-03 11:01:00+00', 20, 12, '12row'); +SELECT time_bucket(INTERVAL '2 hour', timec), b, count(*) +FROM test2 +GROUP BY time_bucket(INTERVAL '2 hour', timec), b +ORDER BY 1, 2; + time_bucket | b | count +------------------------------+----+------- + Fri Jan 03 02:00:00 2020 PST | 11 | 26 + Fri Jan 03 02:00:00 2020 PST | 12 | 1 + Fri Jan 03 04:00:00 2020 PST | 11 | 2 +(3 rows) + +--check status for chunk -- +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 9 | _hyper_14_62_chunk +(1 row) + +SELECT compressed_chunk_schema || '.' || compressed_chunk_name as "COMP_CHUNK_NAME", + chunk_schema || '.' || chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' \gset +SELECT count(*) from test2; + count +------- + 29 +(1 row) + +-- call recompress_chunk inside a transaction. This should fails since +-- it contains transaction-terminating commands. +\set ON_ERROR_STOP 0 +START TRANSACTION; +CALL recompress_chunk(:'CHUNK_NAME'::regclass); +ROLLBACK; +\set ON_ERROR_STOP 1 +CALL recompress_chunk(:'CHUNK_NAME'::regclass); +-- Demonstrate that no locks are held on the hypertable, chunk, or the +-- compressed chunk after recompress_chunk has executed. +SELECT pid, locktype, relation, relation::regclass, mode, granted +FROM pg_locks +WHERE relation::regclass::text IN (:'CHUNK_NAME', :'COMP_CHUNK_NAME', 'test2') +ORDER BY pid; + pid | locktype | relation | relation | mode | granted +-----+----------+----------+----------+------+--------- +(0 rows) + +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_14_62_chunk +(1 row) + +--- insert into a compressed chunk again + a new chunk-- +INSERT INTO test2 values ( '2020-01-03 11:01:03+00', 20, 11, '33row'), + ( '2020-01-03 11:01:06+00', 20, 11, '36row'), + ( '2020-01-03 11:02:00+00', 20, 12, '12row'), + ( '2020-04-03 00:02:00+00', 30, 13, '3013row'); +SELECT time_bucket(INTERVAL '2 hour', timec), b, count(*) +FROM test2 +GROUP BY time_bucket(INTERVAL '2 hour', timec), b +ORDER BY 1, 2; + time_bucket | b | count +------------------------------+----+------- + Fri Jan 03 02:00:00 2020 PST | 11 | 28 + Fri Jan 03 02:00:00 2020 PST | 12 | 2 + Fri Jan 03 04:00:00 2020 PST | 11 | 2 + Thu Apr 02 17:00:00 2020 PDT | 13 | 1 +(4 rows) + +--chunk status should be unordered for the previously compressed chunk +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 9 | _hyper_14_62_chunk + 0 | _hyper_14_64_chunk +(2 rows) + +SELECT add_compression_policy AS job_id + FROM add_compression_policy('test2', '30d'::interval) \gset +CALL run_job(:job_id); +CALL run_job(:job_id); +-- status should be compressed --- +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_14_62_chunk + 1 | _hyper_14_64_chunk +(2 rows) + +\set ON_ERROR_STOP 0 +-- call recompress_chunk when status is not unordered +CALL recompress_chunk(:'CHUNK_NAME'::regclass, true); +psql:include/recompress_basic.sql:110: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk" +-- This will succeed and compress the chunk for the test below. +CALL recompress_chunk(:'CHUNK_NAME'::regclass, false); +psql:include/recompress_basic.sql:113: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk" +--now decompress it , then try and recompress +SELECT decompress_chunk(:'CHUNK_NAME'::regclass); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_14_62_chunk +(1 row) + +CALL recompress_chunk(:'CHUNK_NAME'::regclass); +psql:include/recompress_basic.sql:117: ERROR: call compress_chunk instead of recompress_chunk +\set ON_ERROR_STOP 1 +-- test recompress policy +CREATE TABLE metrics(time timestamptz NOT NULL); +SELECT hypertable_id AS "HYPERTABLE_ID", schema_name, table_name, created FROM create_hypertable('metrics','time') \gset +ALTER TABLE metrics SET (timescaledb.compress); +-- create chunk with some data and compress +INSERT INTO metrics SELECT '2000-01-01' FROM generate_series(1,10); +-- create custom compression job without recompress boolean +SELECT add_job('_timescaledb_functions.policy_compression','1w',('{"hypertable_id": '||:'HYPERTABLE_ID'||', "compress_after": "@ 7 days"}')::jsonb, initial_start => '2000-01-01 00:00:00+00'::timestamptz) AS "JOB_COMPRESS" \gset +-- first call should compress +CALL run_job(:JOB_COMPRESS); +-- 2nd call should do nothing +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- do an INSERT so recompress has something to do +INSERT INTO metrics SELECT '2000-01-01'; +---- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +-- should recompress +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- disable recompress in compress job +SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','false'), next_start => '2000-01-01 00:00:00+00'::timestamptz) FROM _timescaledb_config.bgw_job WHERE id = :JOB_COMPRESS; + alter_job +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1006,"@ 7 days","@ 0",-1,"@ 5 mins",t,"{""recompress"": false, ""hypertable_id"": 16, ""compress_after"": ""@ 7 days""}","Fri Dec 31 16:00:00 1999 PST",,t,"Fri Dec 31 16:00:00 1999 PST",) +(1 row) + +-- nothing to do +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- do an INSERT so recompress has something to do +INSERT INTO metrics SELECT '2000-01-01'; +---- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +-- still nothing to do since we disabled recompress +CALL run_job(:JOB_COMPRESS); +---- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +-- reenable recompress in compress job +SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','true'), next_start => '2000-01-01 00:00:00+00'::timestamptz) FROM _timescaledb_config.bgw_job WHERE id = :JOB_COMPRESS; + alter_job +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1006,"@ 7 days","@ 0",-1,"@ 5 mins",t,"{""recompress"": true, ""hypertable_id"": 16, ""compress_after"": ""@ 7 days""}","Fri Dec 31 16:00:00 1999 PST",,t,"Fri Dec 31 16:00:00 1999 PST",) +(1 row) + +-- should recompress now +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +SELECT delete_job(:JOB_COMPRESS); + delete_job +------------ + +(1 row) + +SELECT add_job('_timescaledb_functions.policy_recompression','1w',('{"hypertable_id": '||:'HYPERTABLE_ID'||', "recompress_after": "@ 7 days", "maxchunks_to_compress": 1}')::jsonb) AS "JOB_RECOMPRESS" \gset +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +---- nothing to do yet +CALL run_job(:JOB_RECOMPRESS); +psql:include/recompress_basic.sql:189: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- create some work for recompress +INSERT INTO metrics SELECT '2000-01-01'; +-- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +CALL run_job(:JOB_RECOMPRESS); +-- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +SELECT delete_job(:JOB_RECOMPRESS); + delete_job +------------ + +(1 row) + +-- Teardown test +\c :TEST_DBNAME :ROLE_SUPERUSER +REVOKE CREATE ON SCHEMA public FROM NOLOGIN_ROLE; +DROP ROLE NOLOGIN_ROLE; diff --git a/tsl/test/expected/compression_bgw-16.out b/tsl/test/expected/compression_bgw-16.out new file mode 100644 index 00000000000..6470ec0e451 --- /dev/null +++ b/tsl/test/expected/compression_bgw-16.out @@ -0,0 +1,657 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE ROLE NOLOGIN_ROLE WITH nologin noinherit; +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO NOLOGIN_ROLE; +GRANT NOLOGIN_ROLE TO :ROLE_DEFAULT_PERM_USER WITH ADMIN OPTION; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + location2 char(10) NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +select create_hypertable( 'conditions', 'time', chunk_time_interval=> '31days'::interval); + create_hypertable +------------------------- + (1,public,conditions,t) +(1 row) + +--TEST 1-- +--cannot set policy without enabling compression -- +\set ON_ERROR_STOP 0 +select add_compression_policy('conditions', '60d'::interval); +ERROR: compression not enabled on hypertable "conditions" +\set ON_ERROR_STOP 1 +-- TEST2 -- +--add a policy to compress chunks -- +alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'time'); +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75; +select add_compression_policy('conditions', '60d'::interval) AS compressjob_id +\gset +select * from _timescaledb_config.bgw_job where id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------+------------------------+--------------------------+---------- + 1000 | Compression Policy [1000] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 1 | {"hypertable_id": 1, "compress_after": "@ 60 days"} | _timescaledb_functions | policy_compression_check | +(1 row) + +select * from alter_job(:compressjob_id, schedule_interval=>'1s'); + job_id | schedule_interval | max_runtime | max_retries | retry_period | scheduled | config | next_start | check_config | fixed_schedule | initial_start | timezone +--------+-------------------+-------------+-------------+--------------+-----------+-----------------------------------------------------+------------+-------------------------------------------------+----------------+---------------+---------- + 1000 | @ 1 sec | @ 0 | -1 | @ 1 hour | t | {"hypertable_id": 1, "compress_after": "@ 60 days"} | -infinity | _timescaledb_functions.policy_compression_check | f | | +(1 row) + +--enable maxchunks to 1 so that only 1 chunk is compressed by the job +SELECT alter_job(id,config:=jsonb_set(config,'{maxchunks_to_compress}', '1')) + FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + (1000,"@ 1 sec","@ 0",-1,"@ 1 hour",t,"{""hypertable_id"": 1, ""compress_after"": ""@ 60 days"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +select * from _timescaledb_config.bgw_job where id >= 1000 ORDER BY id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+--------------------------+---------- + 1000 | Compression Policy [1000] | @ 1 sec | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 1 | {"hypertable_id": 1, "compress_after": "@ 60 days", "maxchunks_to_compress": 1} | _timescaledb_functions | policy_compression_check | +(1 row) + +insert into conditions +select now()::timestamp, 'TOK', 'sony', 55, 75; +-- TEST3 -- +--only the old chunks will get compressed when policy is executed-- +CALL run_job(:compressjob_id); +select chunk_name, pg_size_pretty(before_compression_total_bytes) before_total, +pg_size_pretty( after_compression_total_bytes) after_total +from chunk_compression_stats('conditions') where compression_status like 'Compressed' order by chunk_name; + chunk_name | before_total | after_total +------------------+--------------+------------- + _hyper_1_1_chunk | 32 kB | 40 kB +(1 row) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk ORDER BY id; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+--------------------------+---------------------+---------+--------+----------- + 1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | 4 | f | 1 | f + 2 | 1 | _timescaledb_internal | _hyper_1_2_chunk | | f | 0 | f + 3 | 1 | _timescaledb_internal | _hyper_1_3_chunk | | f | 0 | f + 4 | 2 | _timescaledb_internal | compress_hyper_2_4_chunk | | f | 0 | f +(4 rows) + +-- TEST 4 -- +--cannot set another policy +\set ON_ERROR_STOP 0 +select add_compression_policy('conditions', '60d'::interval, if_not_exists=>true); +NOTICE: compression policy already exists for hypertable "conditions", skipping + add_compression_policy +------------------------ + -1 +(1 row) + +select add_compression_policy('conditions', '60d'::interval); +ERROR: compression policy already exists for hypertable or continuous aggregate "conditions" +select add_compression_policy('conditions', '30d'::interval, if_not_exists=>true); +WARNING: compression policy already exists for hypertable "conditions" + add_compression_policy +------------------------ + -1 +(1 row) + +\set ON_ERROR_STOP 1 +--TEST 5 -- +-- drop the policy -- +select remove_compression_policy('conditions'); + remove_compression_policy +--------------------------- + t +(1 row) + +select count(*) from _timescaledb_config.bgw_job WHERE id>=1000; + count +------- + 0 +(1 row) + +--TEST 6 -- +-- try to execute the policy after it has been dropped -- +\set ON_ERROR_STOP 0 +CALL run_job(:compressjob_id); +ERROR: job 1000 not found +--errors with bad input for add/remove compression policy +create view dummyv1 as select * from conditions limit 1; +select add_compression_policy( 100 , compress_after=> '1 day'::interval); +ERROR: object with id "100" not found +select add_compression_policy( 'dummyv1', compress_after=> '1 day'::interval ); +ERROR: "dummyv1" is not a hypertable or a continuous aggregate +select remove_compression_policy( 100 ); +ERROR: relation is not a hypertable or continuous aggregate +\set ON_ERROR_STOP 1 +-- We're done with the table, so drop it. +DROP TABLE IF EXISTS conditions CASCADE; +NOTICE: drop cascades to table _timescaledb_internal.compress_hyper_2_4_chunk +NOTICE: drop cascades to view dummyv1 +--TEST 7 +--compression policy for smallint, integer or bigint based partition hypertable +--smallint test +CREATE TABLE test_table_smallint(time SMALLINT, val SMALLINT); +SELECT create_hypertable('test_table_smallint', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------------- + (3,public,test_table_smallint,t) +(1 row) + +CREATE OR REPLACE FUNCTION dummy_now_smallint() RETURNS SMALLINT LANGUAGE SQL IMMUTABLE AS 'SELECT 5::SMALLINT'; +SELECT set_integer_now_func('test_table_smallint', 'dummy_now_smallint'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_smallint SELECT generate_series(1,5), 10; +ALTER TABLE test_table_smallint SET (timescaledb.compress); +\set ON_ERROR_STOP 0 +select add_compression_policy( 'test_table_smallint', compress_after=> '1 day'::interval ); +ERROR: unsupported compress_after argument type, expected type : smallint +\set ON_ERROR_STOP 1 +SELECT add_compression_policy('test_table_smallint', 2::SMALLINT) AS compressjob_id \gset +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+---------- + 1001 | Compression Policy [1001] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": 2} | _timescaledb_functions | policy_compression_check | +(1 row) + +--will compress all chunks that need compression +CALL run_job(:compressjob_id); +SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes +FROM chunk_compression_stats('test_table_smallint') +WHERE compression_status LIKE 'Compressed' +ORDER BY chunk_name; + chunk_name | before_compression_total_bytes | after_compression_total_bytes +------------------+--------------------------------+------------------------------- + _hyper_3_5_chunk | 24576 | 24576 + _hyper_3_6_chunk | 24576 | 24576 +(2 rows) + +--integer tests +CREATE TABLE test_table_integer(time INTEGER, val INTEGER); +SELECT create_hypertable('test_table_integer', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------------- + (5,public,test_table_integer,t) +(1 row) + +CREATE OR REPLACE FUNCTION dummy_now_integer() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS 'SELECT 5::INTEGER'; +SELECT set_integer_now_func('test_table_integer', 'dummy_now_integer'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_integer SELECT generate_series(1,5), 10; +ALTER TABLE test_table_integer SET (timescaledb.compress); +SELECT add_compression_policy('test_table_integer', 2::INTEGER) AS compressjob_id \gset +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+---------- + 1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 5 | {"hypertable_id": 5, "compress_after": 2} | _timescaledb_functions | policy_compression_check | +(1 row) + +--will compress all chunks that need compression +CALL run_job(:compressjob_id); +SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes +FROM chunk_compression_stats('test_table_integer') +WHERE compression_status LIKE 'Compressed' +ORDER BY chunk_name; + chunk_name | before_compression_total_bytes | after_compression_total_bytes +-------------------+--------------------------------+------------------------------- + _hyper_5_12_chunk | 24576 | 24576 + _hyper_5_13_chunk | 24576 | 24576 +(2 rows) + +--bigint test +CREATE TABLE test_table_bigint(time BIGINT, val BIGINT); +SELECT create_hypertable('test_table_bigint', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +-------------------------------- + (7,public,test_table_bigint,t) +(1 row) + +CREATE OR REPLACE FUNCTION dummy_now_bigint() RETURNS BIGINT LANGUAGE SQL IMMUTABLE AS 'SELECT 5::BIGINT'; +SELECT set_integer_now_func('test_table_bigint', 'dummy_now_bigint'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_bigint SELECT generate_series(1,5), 10; +ALTER TABLE test_table_bigint SET (timescaledb.compress); +SELECT add_compression_policy('test_table_bigint', 2::BIGINT) AS compressjob_id \gset +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+---------- + 1003 | Compression Policy [1003] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 7 | {"hypertable_id": 7, "compress_after": 2} | _timescaledb_functions | policy_compression_check | +(1 row) + +--will compress all chunks that need compression +CALL run_job(:compressjob_id); +SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes +FROM chunk_compression_stats('test_table_bigint') +WHERE compression_status LIKE 'Compressed' +ORDER BY chunk_name; + chunk_name | before_compression_total_bytes | after_compression_total_bytes +-------------------+--------------------------------+------------------------------- + _hyper_7_19_chunk | 24576 | 24576 + _hyper_7_20_chunk | 24576 | 24576 +(2 rows) + +--TEST 8 +--hypertable owner lacks permission to start background worker +SET ROLE NOLOGIN_ROLE; +CREATE TABLE test_table_nologin(time bigint, val int); +SELECT create_hypertable('test_table_nologin', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------------- + (9,public,test_table_nologin,t) +(1 row) + +SELECT set_integer_now_func('test_table_nologin', 'dummy_now_bigint'); + set_integer_now_func +---------------------- + +(1 row) + +ALTER TABLE test_table_nologin set (timescaledb.compress); +\set ON_ERROR_STOP 0 +SELECT add_compression_policy('test_table_nologin', 2::int); +ERROR: permission denied to start background process as role "nologin_role" +\set ON_ERROR_STOP 1 +DROP TABLE test_table_nologin; +RESET ROLE; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE conditions( + time TIMESTAMPTZ NOT NULL, + device INTEGER, + temperature FLOAT +); +SELECT * FROM create_hypertable('conditions', 'time', + chunk_time_interval => '1 day'::interval); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 11 | public | conditions | t +(1 row) + +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '10 min') AS time; +CREATE MATERIALIZED VIEW conditions_summary +WITH (timescaledb.continuous) AS +SELECT device, + time_bucket(INTERVAL '1 hour', "time") AS day, + AVG(temperature) AS avg_temperature, + MAX(temperature) AS max_temperature, + MIN(temperature) AS min_temperature +FROM conditions +GROUP BY device, time_bucket(INTERVAL '1 hour', "time") WITH NO DATA; +CALL refresh_continuous_aggregate('conditions_summary', NULL, NULL); +ALTER TABLE conditions SET (timescaledb.compress); +SELECT COUNT(*) AS dropped_chunks_count + FROM drop_chunks('conditions', TIMESTAMPTZ '2018-12-15 00:00'); + dropped_chunks_count +---------------------- + 14 +(1 row) + +-- We need to have some chunks that are marked as dropped, otherwise +-- we will not have a problem below. +SELECT COUNT(*) AS dropped_chunks_count + FROM _timescaledb_catalog.chunk + WHERE dropped = TRUE; + dropped_chunks_count +---------------------- + 14 +(1 row) + +SELECT count(*) FROM timescaledb_information.chunks +WHERE hypertable_name = 'conditions' and is_compressed = true; + count +------- + 0 +(1 row) + +SELECT add_compression_policy AS job_id + FROM add_compression_policy('conditions', INTERVAL '1 day') \gset +-- job compresses only 1 chunk at a time -- +SELECT alter_job(id,config:=jsonb_set(config,'{maxchunks_to_compress}', '1')) + FROM _timescaledb_config.bgw_job WHERE id = :job_id; + alter_job +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1004,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""hypertable_id"": 11, ""compress_after"": ""@ 1 day"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +SELECT alter_job(id,config:=jsonb_set(config,'{verbose_log}', 'true')) + FROM _timescaledb_config.bgw_job WHERE id = :job_id; + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1004,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""verbose_log"": true, ""hypertable_id"": 11, ""compress_after"": ""@ 1 day"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +set client_min_messages TO LOG; +CALL run_job(:job_id); +LOG: statement: CALL run_job(1004); +LOG: job 1004 completed processing chunk _timescaledb_internal._hyper_11_40_chunk +set client_min_messages TO NOTICE; +LOG: statement: set client_min_messages TO NOTICE; +SELECT count(*) FROM timescaledb_information.chunks +WHERE hypertable_name = 'conditions' and is_compressed = true; + count +------- + 1 +(1 row) + +\i include/recompress_basic.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE OR REPLACE VIEW compressed_chunk_info_view AS +SELECT + h.schema_name AS hypertable_schema, + h.table_name AS hypertable_name, + c.schema_name as chunk_schema, + c.table_name as chunk_name, + c.status as chunk_status, + comp.schema_name as compressed_chunk_schema, + comp.table_name as compressed_chunk_name +FROM + _timescaledb_catalog.hypertable h JOIN + _timescaledb_catalog.chunk c ON h.id = c.hypertable_id + LEFT JOIN _timescaledb_catalog.chunk comp +ON comp.id = c.compressed_chunk_id +; +CREATE TABLE test2 (timec timestamptz NOT NULL, i integer , + b bigint, t text); +SELECT table_name from create_hypertable('test2', 'timec', chunk_time_interval=> INTERVAL '7 days'); + table_name +------------ + test2 +(1 row) + +INSERT INTO test2 SELECT q, 10, 11, 'hello' FROM generate_series( '2020-01-03 10:00:00+00', '2020-01-03 12:00:00+00' , '5 min'::interval) q; +ALTER TABLE test2 set (timescaledb.compress, +timescaledb.compress_segmentby = 'b', +timescaledb.compress_orderby = 'timec DESC'); +SELECT compress_chunk(c) +FROM show_chunks('test2') c; + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_14_62_chunk +(1 row) + +---insert into the middle of the range --- +INSERT INTO test2 values ( '2020-01-03 10:01:00+00', 20, 11, '2row'); +INSERT INTO test2 values ( '2020-01-03 11:01:00+00', 20, 11, '3row'); +INSERT INTO test2 values ( '2020-01-03 12:01:00+00', 20, 11, '4row'); +--- insert a new segment by --- +INSERT INTO test2 values ( '2020-01-03 11:01:00+00', 20, 12, '12row'); +SELECT time_bucket(INTERVAL '2 hour', timec), b, count(*) +FROM test2 +GROUP BY time_bucket(INTERVAL '2 hour', timec), b +ORDER BY 1, 2; + time_bucket | b | count +------------------------------+----+------- + Fri Jan 03 02:00:00 2020 PST | 11 | 26 + Fri Jan 03 02:00:00 2020 PST | 12 | 1 + Fri Jan 03 04:00:00 2020 PST | 11 | 2 +(3 rows) + +--check status for chunk -- +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 9 | _hyper_14_62_chunk +(1 row) + +SELECT compressed_chunk_schema || '.' || compressed_chunk_name as "COMP_CHUNK_NAME", + chunk_schema || '.' || chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' \gset +SELECT count(*) from test2; + count +------- + 29 +(1 row) + +-- call recompress_chunk inside a transaction. This should fails since +-- it contains transaction-terminating commands. +\set ON_ERROR_STOP 0 +START TRANSACTION; +CALL recompress_chunk(:'CHUNK_NAME'::regclass); +ROLLBACK; +\set ON_ERROR_STOP 1 +CALL recompress_chunk(:'CHUNK_NAME'::regclass); +-- Demonstrate that no locks are held on the hypertable, chunk, or the +-- compressed chunk after recompress_chunk has executed. +SELECT pid, locktype, relation, relation::regclass, mode, granted +FROM pg_locks +WHERE relation::regclass::text IN (:'CHUNK_NAME', :'COMP_CHUNK_NAME', 'test2') +ORDER BY pid; + pid | locktype | relation | relation | mode | granted +-----+----------+----------+----------+------+--------- +(0 rows) + +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_14_62_chunk +(1 row) + +--- insert into a compressed chunk again + a new chunk-- +INSERT INTO test2 values ( '2020-01-03 11:01:03+00', 20, 11, '33row'), + ( '2020-01-03 11:01:06+00', 20, 11, '36row'), + ( '2020-01-03 11:02:00+00', 20, 12, '12row'), + ( '2020-04-03 00:02:00+00', 30, 13, '3013row'); +SELECT time_bucket(INTERVAL '2 hour', timec), b, count(*) +FROM test2 +GROUP BY time_bucket(INTERVAL '2 hour', timec), b +ORDER BY 1, 2; + time_bucket | b | count +------------------------------+----+------- + Fri Jan 03 02:00:00 2020 PST | 11 | 28 + Fri Jan 03 02:00:00 2020 PST | 12 | 2 + Fri Jan 03 04:00:00 2020 PST | 11 | 2 + Thu Apr 02 17:00:00 2020 PDT | 13 | 1 +(4 rows) + +--chunk status should be unordered for the previously compressed chunk +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 9 | _hyper_14_62_chunk + 0 | _hyper_14_64_chunk +(2 rows) + +SELECT add_compression_policy AS job_id + FROM add_compression_policy('test2', '30d'::interval) \gset +CALL run_job(:job_id); +CALL run_job(:job_id); +-- status should be compressed --- +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_14_62_chunk + 1 | _hyper_14_64_chunk +(2 rows) + +\set ON_ERROR_STOP 0 +-- call recompress_chunk when status is not unordered +CALL recompress_chunk(:'CHUNK_NAME'::regclass, true); +psql:include/recompress_basic.sql:110: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk" +-- This will succeed and compress the chunk for the test below. +CALL recompress_chunk(:'CHUNK_NAME'::regclass, false); +psql:include/recompress_basic.sql:113: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk" +--now decompress it , then try and recompress +SELECT decompress_chunk(:'CHUNK_NAME'::regclass); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_14_62_chunk +(1 row) + +CALL recompress_chunk(:'CHUNK_NAME'::regclass); +psql:include/recompress_basic.sql:117: ERROR: call compress_chunk instead of recompress_chunk +\set ON_ERROR_STOP 1 +-- test recompress policy +CREATE TABLE metrics(time timestamptz NOT NULL); +SELECT hypertable_id AS "HYPERTABLE_ID", schema_name, table_name, created FROM create_hypertable('metrics','time') \gset +ALTER TABLE metrics SET (timescaledb.compress); +-- create chunk with some data and compress +INSERT INTO metrics SELECT '2000-01-01' FROM generate_series(1,10); +-- create custom compression job without recompress boolean +SELECT add_job('_timescaledb_functions.policy_compression','1w',('{"hypertable_id": '||:'HYPERTABLE_ID'||', "compress_after": "@ 7 days"}')::jsonb, initial_start => '2000-01-01 00:00:00+00'::timestamptz) AS "JOB_COMPRESS" \gset +-- first call should compress +CALL run_job(:JOB_COMPRESS); +-- 2nd call should do nothing +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- do an INSERT so recompress has something to do +INSERT INTO metrics SELECT '2000-01-01'; +---- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +-- should recompress +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- disable recompress in compress job +SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','false'), next_start => '2000-01-01 00:00:00+00'::timestamptz) FROM _timescaledb_config.bgw_job WHERE id = :JOB_COMPRESS; + alter_job +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1006,"@ 7 days","@ 0",-1,"@ 5 mins",t,"{""recompress"": false, ""hypertable_id"": 16, ""compress_after"": ""@ 7 days""}","Fri Dec 31 16:00:00 1999 PST",,t,"Fri Dec 31 16:00:00 1999 PST",) +(1 row) + +-- nothing to do +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- do an INSERT so recompress has something to do +INSERT INTO metrics SELECT '2000-01-01'; +---- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +-- still nothing to do since we disabled recompress +CALL run_job(:JOB_COMPRESS); +---- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +-- reenable recompress in compress job +SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','true'), next_start => '2000-01-01 00:00:00+00'::timestamptz) FROM _timescaledb_config.bgw_job WHERE id = :JOB_COMPRESS; + alter_job +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1006,"@ 7 days","@ 0",-1,"@ 5 mins",t,"{""recompress"": true, ""hypertable_id"": 16, ""compress_after"": ""@ 7 days""}","Fri Dec 31 16:00:00 1999 PST",,t,"Fri Dec 31 16:00:00 1999 PST",) +(1 row) + +-- should recompress now +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +SELECT delete_job(:JOB_COMPRESS); + delete_job +------------ + +(1 row) + +SELECT add_job('_timescaledb_functions.policy_recompression','1w',('{"hypertable_id": '||:'HYPERTABLE_ID'||', "recompress_after": "@ 7 days", "maxchunks_to_compress": 1}')::jsonb) AS "JOB_RECOMPRESS" \gset +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +---- nothing to do yet +CALL run_job(:JOB_RECOMPRESS); +psql:include/recompress_basic.sql:189: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- create some work for recompress +INSERT INTO metrics SELECT '2000-01-01'; +-- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +CALL run_job(:JOB_RECOMPRESS); +-- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +SELECT delete_job(:JOB_RECOMPRESS); + delete_job +------------ + +(1 row) + +-- Teardown test +\c :TEST_DBNAME :ROLE_SUPERUSER +REVOKE CREATE ON SCHEMA public FROM NOLOGIN_ROLE; +DROP ROLE NOLOGIN_ROLE; diff --git a/tsl/test/expected/telemetry_stats-13.out b/tsl/test/expected/telemetry_stats-13.out new file mode 100644 index 00000000000..13d4ca32ce8 --- /dev/null +++ b/tsl/test/expected/telemetry_stats-13.out @@ -0,0 +1,748 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +--telemetry tests that require a community license +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +-- function call info size is too variable for this test, so disable it +SET timescaledb.telemetry_level='no_functions'; +SELECT setseed(1); + setseed +--------- + +(1 row) + +-- Create a materialized view from the telemetry report so that we +-- don't regenerate telemetry for every query. Filter heap_size for +-- materialized views since PG14 reports a different heap size for +-- them compared to earlier PG versions. +CREATE MATERIALIZED VIEW telemetry_report AS +SELECT (r #- '{relations,materialized_views,heap_size}') AS r +FROM get_telemetry_report() r; +CREATE VIEW relations AS +SELECT r -> 'relations' AS rels +FROM telemetry_report; +SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, + rels -> 'hypertables' -> 'num_relations' AS num_hypertables +FROM relations; + num_continuous_aggs | num_hypertables +---------------------+----------------- + 0 | 0 +(1 row) + +-- check telemetry picks up flagged content from metadata +SELECT r -> 'db_metadata' AS db_metadata +FROM telemetry_report; + db_metadata +------------- + {} +(1 row) + +-- check timescaledb_telemetry.cloud +SELECT r -> 'instance_metadata' AS instance_metadata +FROM telemetry_report r; + instance_metadata +------------------- + {"cloud": "ci"} +(1 row) + +CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); +CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); +CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); +CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); +CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE hyper (LIKE normal); +SELECT table_name FROM create_hypertable('hyper', 'time'); + table_name +------------ + hyper +(1 row) + +CREATE MATERIALIZED VIEW contagg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg" is already up-to-date +CREATE MATERIALIZED VIEW contagg_old +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg_old" is already up-to-date +-- Create another view (already have the "relations" view) +CREATE VIEW devices AS +SELECT DISTINCT ON (device) device +FROM hyper; +-- Show relations with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 0, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 0 + + }, + + "hypertables": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 8192, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 0, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "continuous_aggregates": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- Insert data +INSERT INTO normal +SELECT t, ceil(random() * 10)::int, random() * 30 +FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; +INSERT INTO hyper +SELECT * FROM normal; +INSERT INTO part +SELECT * FROM normal; +CALL refresh_continuous_aggregate('contagg', NULL, NULL); +CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); +-- ANALYZE to get updated reltuples stats +ANALYZE normal, hyper, part; +SELECT count(c) FROM show_chunks('hyper') c; + count +------- + 9 +(1 row) + +SELECT count(c) FROM show_chunks('contagg') c; + count +------- + 2 +(1 row) + +SELECT count(c) FROM show_chunks('contagg_old') c; + count +------- + 2 +(1 row) + +-- Update and show the telemetry report +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 155648, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 16384, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 229376, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- Actual row count should be the same as reltuples stats for all tables +SELECT (SELECT count(*) FROM normal) num_inserted_rows, + (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, + (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, + (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; + num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples +-------------------+------------------+-----------------+---------------- + 697 | 697 | 697 | 697 +(1 row) + +-- Add compression +ALTER TABLE hyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('hyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk +(4 rows) + +ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); +NOTICE: defaulting compress_segmentby to device +NOTICE: defaulting compress_orderby to hour +SELECT compress_chunk(c) +FROM show_chunks('contagg') c ORDER BY c LIMIT 1; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_2_10_chunk +(1 row) + +-- Turn of real-time aggregation +ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); +ANALYZE normal, hyper, part; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 32768, + + "compression": { + + "compressed_heap_size": 32768, + + "compressed_row_count": 4, + + "compressed_toast_size": 32768, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 32768, + + "uncompressed_row_count": 284, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 65536, + + "num_compressed_hypertables": 1, + + "compressed_row_count_frozen_immediately": 4 + + }, + + "indexes_size": 122880, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 413 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 180224, + + "toast_size": 24576, + + "compression": { + + "compressed_heap_size": 40960, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920, + + "compressed_row_count_frozen_immediately": 10+ + }, + + "indexes_size": 180224, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 1 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- check telemetry for fixed schedule jobs works +create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_fixed'; +end +$$; +create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_drifting'; +end +$$; +-- before adding the jobs +select get_telemetry_report()->'num_user_defined_actions_fixed'; + ?column? +---------- + 0 +(1 row) + +select get_telemetry_report()->'num_user_defined_actions'; + ?column? +---------- + 0 +(1 row) + +select add_job('job_test_fixed', '1 week'); + add_job +--------- + 1000 +(1 row) + +select add_job('job_test_drifting', '1 week', fixed_schedule => false); + add_job +--------- + 1001 +(1 row) + +-- add continuous aggregate refresh policy for contagg +select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +-- add retention policy, fixed +select add_retention_policy('hyper', interval '1 year', initial_start => now()); + add_retention_policy +---------------------- + 1004 +(1 row) + +-- add compression policy +select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); + add_compression_policy +------------------------ + 1005 +(1 row) + +select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; + uda_fixed | uda_drifting +-----------+-------------- + 1 | 1 +(1 row) + +select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; + contagg_fixed | contagg_drifting +---------------+------------------ + 1 | 1 +(1 row) + +select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; + compress_fixed | retention_fixed +----------------+----------------- + 1 | 1 +(1 row) + +DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; +TRUNCATE _timescaledb_internal.job_errors; +-- create some "errors" for testing +INSERT INTO +_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) +VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), +(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), +(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'), +(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'), +(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'), +-- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions +(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'); +-- create some errors for them +INSERT INTO +_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) +values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), +(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), +(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), +(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'), +(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'), +(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'), +(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'); +-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs +SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); + jsonb_pretty +---------------------------------------------- + { + + "policy_retention": { + + "P0001": 1 + + }, + + "policy_compression": { + + "JF009": 1 + + }, + + "user_defined_action": { + + "ABCDE": 1, + + "P0001": 2 + + }, + + "policy_refresh_continuous_aggregate": {+ + "P0001": 2 + + } + + } +(1 row) + +-- for job statistics, insert some records into bgw_job_stats +INSERT INTO _timescaledb_internal.bgw_job_stat +values +(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); +SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); + jsonb_pretty +------------------------------------------------ + { + + "policy_retention": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_compression": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "user_defined_action": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_refresh_continuous_aggregate": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + } + + } +(1 row) + +-- create nested continuous aggregates - copied from cagg_on_cagg_common +CREATE TABLE conditions ( + time timestamptz NOT NULL, + temperature int +); +SELECT create_hypertable('conditions', 'time'); + create_hypertable +------------------------- + (6,public,conditions,t) +(1 row) + +CREATE MATERIALIZED VIEW conditions_summary_hourly_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 hour', "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_daily_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 day', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_hourly_1 +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_weekly_3 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 week', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_daily_2 +GROUP BY 1 +WITH NO DATA; +SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); + jsonb_pretty +-------------- + 2 +(1 row) + +DROP VIEW relations; +DROP MATERIALIZED VIEW telemetry_report; diff --git a/tsl/test/expected/telemetry_stats-14.out b/tsl/test/expected/telemetry_stats-14.out new file mode 100644 index 00000000000..dbebefc4231 --- /dev/null +++ b/tsl/test/expected/telemetry_stats-14.out @@ -0,0 +1,748 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +--telemetry tests that require a community license +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +-- function call info size is too variable for this test, so disable it +SET timescaledb.telemetry_level='no_functions'; +SELECT setseed(1); + setseed +--------- + +(1 row) + +-- Create a materialized view from the telemetry report so that we +-- don't regenerate telemetry for every query. Filter heap_size for +-- materialized views since PG14 reports a different heap size for +-- them compared to earlier PG versions. +CREATE MATERIALIZED VIEW telemetry_report AS +SELECT (r #- '{relations,materialized_views,heap_size}') AS r +FROM get_telemetry_report() r; +CREATE VIEW relations AS +SELECT r -> 'relations' AS rels +FROM telemetry_report; +SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, + rels -> 'hypertables' -> 'num_relations' AS num_hypertables +FROM relations; + num_continuous_aggs | num_hypertables +---------------------+----------------- + 0 | 0 +(1 row) + +-- check telemetry picks up flagged content from metadata +SELECT r -> 'db_metadata' AS db_metadata +FROM telemetry_report; + db_metadata +------------- + {} +(1 row) + +-- check timescaledb_telemetry.cloud +SELECT r -> 'instance_metadata' AS instance_metadata +FROM telemetry_report r; + instance_metadata +------------------- + {"cloud": "ci"} +(1 row) + +CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); +CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); +CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); +CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); +CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE hyper (LIKE normal); +SELECT table_name FROM create_hypertable('hyper', 'time'); + table_name +------------ + hyper +(1 row) + +CREATE MATERIALIZED VIEW contagg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg" is already up-to-date +CREATE MATERIALIZED VIEW contagg_old +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg_old" is already up-to-date +-- Create another view (already have the "relations" view) +CREATE VIEW devices AS +SELECT DISTINCT ON (device) device +FROM hyper; +-- Show relations with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 0, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 0 + + }, + + "hypertables": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 8192, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 0, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "continuous_aggregates": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- Insert data +INSERT INTO normal +SELECT t, ceil(random() * 10)::int, random() * 30 +FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; +INSERT INTO hyper +SELECT * FROM normal; +INSERT INTO part +SELECT * FROM normal; +CALL refresh_continuous_aggregate('contagg', NULL, NULL); +CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); +-- ANALYZE to get updated reltuples stats +ANALYZE normal, hyper, part; +SELECT count(c) FROM show_chunks('hyper') c; + count +------- + 9 +(1 row) + +SELECT count(c) FROM show_chunks('contagg') c; + count +------- + 2 +(1 row) + +SELECT count(c) FROM show_chunks('contagg_old') c; + count +------- + 2 +(1 row) + +-- Update and show the telemetry report +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 155648, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 16384, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 229376, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- Actual row count should be the same as reltuples stats for all tables +SELECT (SELECT count(*) FROM normal) num_inserted_rows, + (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, + (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, + (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; + num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples +-------------------+------------------+-----------------+---------------- + 697 | 697 | 697 | 697 +(1 row) + +-- Add compression +ALTER TABLE hyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('hyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk +(4 rows) + +ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); +NOTICE: defaulting compress_segmentby to device +NOTICE: defaulting compress_orderby to hour +SELECT compress_chunk(c) +FROM show_chunks('contagg') c ORDER BY c LIMIT 1; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_2_10_chunk +(1 row) + +-- Turn of real-time aggregation +ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); +ANALYZE normal, hyper, part; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 106496, + + "toast_size": 32768, + + "compression": { + + "compressed_heap_size": 65536, + + "compressed_row_count": 4, + + "compressed_toast_size": 32768, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 32768, + + "uncompressed_row_count": 284, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 65536, + + "num_compressed_hypertables": 1, + + "compressed_row_count_frozen_immediately": 4 + + }, + + "indexes_size": 122880, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 413 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 24576, + + "compression": { + + "compressed_heap_size": 49152, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920, + + "compressed_row_count_frozen_immediately": 10+ + }, + + "indexes_size": 180224, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 1 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- check telemetry for fixed schedule jobs works +create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_fixed'; +end +$$; +create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_drifting'; +end +$$; +-- before adding the jobs +select get_telemetry_report()->'num_user_defined_actions_fixed'; + ?column? +---------- + 0 +(1 row) + +select get_telemetry_report()->'num_user_defined_actions'; + ?column? +---------- + 0 +(1 row) + +select add_job('job_test_fixed', '1 week'); + add_job +--------- + 1000 +(1 row) + +select add_job('job_test_drifting', '1 week', fixed_schedule => false); + add_job +--------- + 1001 +(1 row) + +-- add continuous aggregate refresh policy for contagg +select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +-- add retention policy, fixed +select add_retention_policy('hyper', interval '1 year', initial_start => now()); + add_retention_policy +---------------------- + 1004 +(1 row) + +-- add compression policy +select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); + add_compression_policy +------------------------ + 1005 +(1 row) + +select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; + uda_fixed | uda_drifting +-----------+-------------- + 1 | 1 +(1 row) + +select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; + contagg_fixed | contagg_drifting +---------------+------------------ + 1 | 1 +(1 row) + +select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; + compress_fixed | retention_fixed +----------------+----------------- + 1 | 1 +(1 row) + +DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; +TRUNCATE _timescaledb_internal.job_errors; +-- create some "errors" for testing +INSERT INTO +_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) +VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), +(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), +(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'), +(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'), +(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'), +-- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions +(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'); +-- create some errors for them +INSERT INTO +_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) +values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), +(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), +(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), +(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'), +(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'), +(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'), +(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'); +-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs +SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); + jsonb_pretty +---------------------------------------------- + { + + "policy_retention": { + + "P0001": 1 + + }, + + "policy_compression": { + + "JF009": 1 + + }, + + "user_defined_action": { + + "ABCDE": 1, + + "P0001": 2 + + }, + + "policy_refresh_continuous_aggregate": {+ + "P0001": 2 + + } + + } +(1 row) + +-- for job statistics, insert some records into bgw_job_stats +INSERT INTO _timescaledb_internal.bgw_job_stat +values +(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); +SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); + jsonb_pretty +------------------------------------------------ + { + + "policy_retention": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_compression": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "user_defined_action": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_refresh_continuous_aggregate": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + } + + } +(1 row) + +-- create nested continuous aggregates - copied from cagg_on_cagg_common +CREATE TABLE conditions ( + time timestamptz NOT NULL, + temperature int +); +SELECT create_hypertable('conditions', 'time'); + create_hypertable +------------------------- + (6,public,conditions,t) +(1 row) + +CREATE MATERIALIZED VIEW conditions_summary_hourly_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 hour', "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_daily_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 day', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_hourly_1 +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_weekly_3 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 week', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_daily_2 +GROUP BY 1 +WITH NO DATA; +SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); + jsonb_pretty +-------------- + 2 +(1 row) + +DROP VIEW relations; +DROP MATERIALIZED VIEW telemetry_report; diff --git a/tsl/test/expected/telemetry_stats-15.out b/tsl/test/expected/telemetry_stats-15.out new file mode 100644 index 00000000000..dbebefc4231 --- /dev/null +++ b/tsl/test/expected/telemetry_stats-15.out @@ -0,0 +1,748 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +--telemetry tests that require a community license +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +-- function call info size is too variable for this test, so disable it +SET timescaledb.telemetry_level='no_functions'; +SELECT setseed(1); + setseed +--------- + +(1 row) + +-- Create a materialized view from the telemetry report so that we +-- don't regenerate telemetry for every query. Filter heap_size for +-- materialized views since PG14 reports a different heap size for +-- them compared to earlier PG versions. +CREATE MATERIALIZED VIEW telemetry_report AS +SELECT (r #- '{relations,materialized_views,heap_size}') AS r +FROM get_telemetry_report() r; +CREATE VIEW relations AS +SELECT r -> 'relations' AS rels +FROM telemetry_report; +SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, + rels -> 'hypertables' -> 'num_relations' AS num_hypertables +FROM relations; + num_continuous_aggs | num_hypertables +---------------------+----------------- + 0 | 0 +(1 row) + +-- check telemetry picks up flagged content from metadata +SELECT r -> 'db_metadata' AS db_metadata +FROM telemetry_report; + db_metadata +------------- + {} +(1 row) + +-- check timescaledb_telemetry.cloud +SELECT r -> 'instance_metadata' AS instance_metadata +FROM telemetry_report r; + instance_metadata +------------------- + {"cloud": "ci"} +(1 row) + +CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); +CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); +CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); +CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); +CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE hyper (LIKE normal); +SELECT table_name FROM create_hypertable('hyper', 'time'); + table_name +------------ + hyper +(1 row) + +CREATE MATERIALIZED VIEW contagg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg" is already up-to-date +CREATE MATERIALIZED VIEW contagg_old +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg_old" is already up-to-date +-- Create another view (already have the "relations" view) +CREATE VIEW devices AS +SELECT DISTINCT ON (device) device +FROM hyper; +-- Show relations with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 0, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 0 + + }, + + "hypertables": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 8192, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 0, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "continuous_aggregates": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- Insert data +INSERT INTO normal +SELECT t, ceil(random() * 10)::int, random() * 30 +FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; +INSERT INTO hyper +SELECT * FROM normal; +INSERT INTO part +SELECT * FROM normal; +CALL refresh_continuous_aggregate('contagg', NULL, NULL); +CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); +-- ANALYZE to get updated reltuples stats +ANALYZE normal, hyper, part; +SELECT count(c) FROM show_chunks('hyper') c; + count +------- + 9 +(1 row) + +SELECT count(c) FROM show_chunks('contagg') c; + count +------- + 2 +(1 row) + +SELECT count(c) FROM show_chunks('contagg_old') c; + count +------- + 2 +(1 row) + +-- Update and show the telemetry report +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 155648, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 16384, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 229376, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- Actual row count should be the same as reltuples stats for all tables +SELECT (SELECT count(*) FROM normal) num_inserted_rows, + (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, + (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, + (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; + num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples +-------------------+------------------+-----------------+---------------- + 697 | 697 | 697 | 697 +(1 row) + +-- Add compression +ALTER TABLE hyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('hyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk +(4 rows) + +ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); +NOTICE: defaulting compress_segmentby to device +NOTICE: defaulting compress_orderby to hour +SELECT compress_chunk(c) +FROM show_chunks('contagg') c ORDER BY c LIMIT 1; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_2_10_chunk +(1 row) + +-- Turn of real-time aggregation +ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); +ANALYZE normal, hyper, part; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 106496, + + "toast_size": 32768, + + "compression": { + + "compressed_heap_size": 65536, + + "compressed_row_count": 4, + + "compressed_toast_size": 32768, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 32768, + + "uncompressed_row_count": 284, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 65536, + + "num_compressed_hypertables": 1, + + "compressed_row_count_frozen_immediately": 4 + + }, + + "indexes_size": 122880, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 413 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 24576, + + "compression": { + + "compressed_heap_size": 49152, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920, + + "compressed_row_count_frozen_immediately": 10+ + }, + + "indexes_size": 180224, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 1 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- check telemetry for fixed schedule jobs works +create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_fixed'; +end +$$; +create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_drifting'; +end +$$; +-- before adding the jobs +select get_telemetry_report()->'num_user_defined_actions_fixed'; + ?column? +---------- + 0 +(1 row) + +select get_telemetry_report()->'num_user_defined_actions'; + ?column? +---------- + 0 +(1 row) + +select add_job('job_test_fixed', '1 week'); + add_job +--------- + 1000 +(1 row) + +select add_job('job_test_drifting', '1 week', fixed_schedule => false); + add_job +--------- + 1001 +(1 row) + +-- add continuous aggregate refresh policy for contagg +select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +-- add retention policy, fixed +select add_retention_policy('hyper', interval '1 year', initial_start => now()); + add_retention_policy +---------------------- + 1004 +(1 row) + +-- add compression policy +select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); + add_compression_policy +------------------------ + 1005 +(1 row) + +select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; + uda_fixed | uda_drifting +-----------+-------------- + 1 | 1 +(1 row) + +select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; + contagg_fixed | contagg_drifting +---------------+------------------ + 1 | 1 +(1 row) + +select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; + compress_fixed | retention_fixed +----------------+----------------- + 1 | 1 +(1 row) + +DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; +TRUNCATE _timescaledb_internal.job_errors; +-- create some "errors" for testing +INSERT INTO +_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) +VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), +(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), +(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'), +(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'), +(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'), +-- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions +(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'); +-- create some errors for them +INSERT INTO +_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) +values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), +(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), +(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), +(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'), +(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'), +(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'), +(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'); +-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs +SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); + jsonb_pretty +---------------------------------------------- + { + + "policy_retention": { + + "P0001": 1 + + }, + + "policy_compression": { + + "JF009": 1 + + }, + + "user_defined_action": { + + "ABCDE": 1, + + "P0001": 2 + + }, + + "policy_refresh_continuous_aggregate": {+ + "P0001": 2 + + } + + } +(1 row) + +-- for job statistics, insert some records into bgw_job_stats +INSERT INTO _timescaledb_internal.bgw_job_stat +values +(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); +SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); + jsonb_pretty +------------------------------------------------ + { + + "policy_retention": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_compression": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "user_defined_action": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_refresh_continuous_aggregate": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + } + + } +(1 row) + +-- create nested continuous aggregates - copied from cagg_on_cagg_common +CREATE TABLE conditions ( + time timestamptz NOT NULL, + temperature int +); +SELECT create_hypertable('conditions', 'time'); + create_hypertable +------------------------- + (6,public,conditions,t) +(1 row) + +CREATE MATERIALIZED VIEW conditions_summary_hourly_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 hour', "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_daily_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 day', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_hourly_1 +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_weekly_3 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 week', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_daily_2 +GROUP BY 1 +WITH NO DATA; +SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); + jsonb_pretty +-------------- + 2 +(1 row) + +DROP VIEW relations; +DROP MATERIALIZED VIEW telemetry_report; diff --git a/tsl/test/expected/telemetry_stats-16.out b/tsl/test/expected/telemetry_stats-16.out new file mode 100644 index 00000000000..dbebefc4231 --- /dev/null +++ b/tsl/test/expected/telemetry_stats-16.out @@ -0,0 +1,748 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +--telemetry tests that require a community license +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +-- function call info size is too variable for this test, so disable it +SET timescaledb.telemetry_level='no_functions'; +SELECT setseed(1); + setseed +--------- + +(1 row) + +-- Create a materialized view from the telemetry report so that we +-- don't regenerate telemetry for every query. Filter heap_size for +-- materialized views since PG14 reports a different heap size for +-- them compared to earlier PG versions. +CREATE MATERIALIZED VIEW telemetry_report AS +SELECT (r #- '{relations,materialized_views,heap_size}') AS r +FROM get_telemetry_report() r; +CREATE VIEW relations AS +SELECT r -> 'relations' AS rels +FROM telemetry_report; +SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, + rels -> 'hypertables' -> 'num_relations' AS num_hypertables +FROM relations; + num_continuous_aggs | num_hypertables +---------------------+----------------- + 0 | 0 +(1 row) + +-- check telemetry picks up flagged content from metadata +SELECT r -> 'db_metadata' AS db_metadata +FROM telemetry_report; + db_metadata +------------- + {} +(1 row) + +-- check timescaledb_telemetry.cloud +SELECT r -> 'instance_metadata' AS instance_metadata +FROM telemetry_report r; + instance_metadata +------------------- + {"cloud": "ci"} +(1 row) + +CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); +CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); +CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); +CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); +CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE hyper (LIKE normal); +SELECT table_name FROM create_hypertable('hyper', 'time'); + table_name +------------ + hyper +(1 row) + +CREATE MATERIALIZED VIEW contagg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg" is already up-to-date +CREATE MATERIALIZED VIEW contagg_old +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg_old" is already up-to-date +-- Create another view (already have the "relations" view) +CREATE VIEW devices AS +SELECT DISTINCT ON (device) device +FROM hyper; +-- Show relations with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 0, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 0 + + }, + + "hypertables": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 8192, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 0, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "continuous_aggregates": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- Insert data +INSERT INTO normal +SELECT t, ceil(random() * 10)::int, random() * 30 +FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; +INSERT INTO hyper +SELECT * FROM normal; +INSERT INTO part +SELECT * FROM normal; +CALL refresh_continuous_aggregate('contagg', NULL, NULL); +CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); +-- ANALYZE to get updated reltuples stats +ANALYZE normal, hyper, part; +SELECT count(c) FROM show_chunks('hyper') c; + count +------- + 9 +(1 row) + +SELECT count(c) FROM show_chunks('contagg') c; + count +------- + 2 +(1 row) + +SELECT count(c) FROM show_chunks('contagg_old') c; + count +------- + 2 +(1 row) + +-- Update and show the telemetry report +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 155648, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 16384, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 229376, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- Actual row count should be the same as reltuples stats for all tables +SELECT (SELECT count(*) FROM normal) num_inserted_rows, + (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, + (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, + (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; + num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples +-------------------+------------------+-----------------+---------------- + 697 | 697 | 697 | 697 +(1 row) + +-- Add compression +ALTER TABLE hyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('hyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk +(4 rows) + +ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); +NOTICE: defaulting compress_segmentby to device +NOTICE: defaulting compress_orderby to hour +SELECT compress_chunk(c) +FROM show_chunks('contagg') c ORDER BY c LIMIT 1; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_2_10_chunk +(1 row) + +-- Turn of real-time aggregation +ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); +ANALYZE normal, hyper, part; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 106496, + + "toast_size": 32768, + + "compression": { + + "compressed_heap_size": 65536, + + "compressed_row_count": 4, + + "compressed_toast_size": 32768, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 32768, + + "uncompressed_row_count": 284, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 65536, + + "num_compressed_hypertables": 1, + + "compressed_row_count_frozen_immediately": 4 + + }, + + "indexes_size": 122880, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 413 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 24576, + + "compression": { + + "compressed_heap_size": 49152, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920, + + "compressed_row_count_frozen_immediately": 10+ + }, + + "indexes_size": 180224, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 1 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- check telemetry for fixed schedule jobs works +create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_fixed'; +end +$$; +create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_drifting'; +end +$$; +-- before adding the jobs +select get_telemetry_report()->'num_user_defined_actions_fixed'; + ?column? +---------- + 0 +(1 row) + +select get_telemetry_report()->'num_user_defined_actions'; + ?column? +---------- + 0 +(1 row) + +select add_job('job_test_fixed', '1 week'); + add_job +--------- + 1000 +(1 row) + +select add_job('job_test_drifting', '1 week', fixed_schedule => false); + add_job +--------- + 1001 +(1 row) + +-- add continuous aggregate refresh policy for contagg +select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +-- add retention policy, fixed +select add_retention_policy('hyper', interval '1 year', initial_start => now()); + add_retention_policy +---------------------- + 1004 +(1 row) + +-- add compression policy +select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); + add_compression_policy +------------------------ + 1005 +(1 row) + +select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; + uda_fixed | uda_drifting +-----------+-------------- + 1 | 1 +(1 row) + +select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; + contagg_fixed | contagg_drifting +---------------+------------------ + 1 | 1 +(1 row) + +select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; + compress_fixed | retention_fixed +----------------+----------------- + 1 | 1 +(1 row) + +DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; +TRUNCATE _timescaledb_internal.job_errors; +-- create some "errors" for testing +INSERT INTO +_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) +VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), +(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), +(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'), +(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'), +(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'), +-- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions +(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'); +-- create some errors for them +INSERT INTO +_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) +values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), +(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), +(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), +(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'), +(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'), +(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'), +(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'); +-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs +SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); + jsonb_pretty +---------------------------------------------- + { + + "policy_retention": { + + "P0001": 1 + + }, + + "policy_compression": { + + "JF009": 1 + + }, + + "user_defined_action": { + + "ABCDE": 1, + + "P0001": 2 + + }, + + "policy_refresh_continuous_aggregate": {+ + "P0001": 2 + + } + + } +(1 row) + +-- for job statistics, insert some records into bgw_job_stats +INSERT INTO _timescaledb_internal.bgw_job_stat +values +(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); +SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); + jsonb_pretty +------------------------------------------------ + { + + "policy_retention": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_compression": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "user_defined_action": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_refresh_continuous_aggregate": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + } + + } +(1 row) + +-- create nested continuous aggregates - copied from cagg_on_cagg_common +CREATE TABLE conditions ( + time timestamptz NOT NULL, + temperature int +); +SELECT create_hypertable('conditions', 'time'); + create_hypertable +------------------------- + (6,public,conditions,t) +(1 row) + +CREATE MATERIALIZED VIEW conditions_summary_hourly_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 hour', "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_daily_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 day', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_hourly_1 +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_weekly_3 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 week', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_daily_2 +GROUP BY 1 +WITH NO DATA; +SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); + jsonb_pretty +-------------- + 2 +(1 row) + +DROP VIEW relations; +DROP MATERIALIZED VIEW telemetry_report; diff --git a/tsl/test/expected/telemetry_stats.out b/tsl/test/expected/telemetry_stats.out deleted file mode 100644 index 39f43f7e6eb..00000000000 --- a/tsl/test/expected/telemetry_stats.out +++ /dev/null @@ -1,736 +0,0 @@ --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. ---telemetry tests that require a community license -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; --- function call info size is too variable for this test, so disable it -SET timescaledb.telemetry_level='no_functions'; -SELECT setseed(1); - setseed ---------- - -(1 row) - --- Create a materialized view from the telemetry report so that we --- don't regenerate telemetry for every query. Filter heap_size for --- materialized views since PG14 reports a different heap size for --- them compared to earlier PG versions. -CREATE MATERIALIZED VIEW telemetry_report AS -SELECT (r #- '{relations,materialized_views,heap_size}') AS r -FROM get_telemetry_report() r; -CREATE VIEW relations AS -SELECT r -> 'relations' AS rels -FROM telemetry_report; -SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, - rels -> 'hypertables' -> 'num_relations' AS num_hypertables -FROM relations; - num_continuous_aggs | num_hypertables ----------------------+----------------- - 0 | 0 -(1 row) - --- check telemetry picks up flagged content from metadata -SELECT r -> 'db_metadata' AS db_metadata -FROM telemetry_report; - db_metadata -------------- - {} -(1 row) - --- check timescaledb_telemetry.cloud -SELECT r -> 'instance_metadata' AS instance_metadata -FROM telemetry_report r; - instance_metadata -------------------- - {"cloud": "ci"} -(1 row) - -CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); -CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); -CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); -CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); -CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); -CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); -CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); -CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); -CREATE TABLE hyper (LIKE normal); -SELECT table_name FROM create_hypertable('hyper', 'time'); - table_name ------------- - hyper -(1 row) - -CREATE MATERIALIZED VIEW contagg -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - hyper -GROUP BY hour, device; -NOTICE: continuous aggregate "contagg" is already up-to-date -CREATE MATERIALIZED VIEW contagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - hyper -GROUP BY hour, device; -NOTICE: continuous aggregate "contagg_old" is already up-to-date --- Create another view (already have the "relations" view) -CREATE VIEW devices AS -SELECT DISTINCT ON (device) device -FROM hyper; --- Show relations with no data -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 0, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 0 + - }, + - "hypertables": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 8192, + - "num_children": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 0, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "continuous_aggregates": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "num_compressed_caggs": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + - } -(1 row) - --- Insert data -INSERT INTO normal -SELECT t, ceil(random() * 10)::int, random() * 30 -FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; -INSERT INTO hyper -SELECT * FROM normal; -INSERT INTO part -SELECT * FROM normal; -CALL refresh_continuous_aggregate('contagg', NULL, NULL); -CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); --- ANALYZE to get updated reltuples stats -ANALYZE normal, hyper, part; -SELECT count(c) FROM show_chunks('hyper') c; - count -------- - 9 -(1 row) - -SELECT count(c) FROM show_chunks('contagg') c; - count -------- - 2 -(1 row) - -SELECT count(c) FROM show_chunks('contagg_old') c; - count -------- - 2 -(1 row) - --- Update and show the telemetry report -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 65536, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 697 + - }, + - "hypertables": { + - "heap_size": 73728, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 155648, + - "num_children": 9, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 98304, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "continuous_aggregates": { + - "heap_size": 188416, + - "toast_size": 16384, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "num_compressed_caggs": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0 + - }, + - "indexes_size": 229376, + - "num_children": 4, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + - } -(1 row) - --- Actual row count should be the same as reltuples stats for all tables -SELECT (SELECT count(*) FROM normal) num_inserted_rows, - (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, - (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, - (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; - num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples --------------------+------------------+-----------------+---------------- - 697 | 697 | 697 | 697 -(1 row) - --- Add compression -ALTER TABLE hyper SET (timescaledb.compress); -SELECT compress_chunk(c) -FROM show_chunks('hyper') c ORDER BY c LIMIT 4; - compress_chunk ----------------------------------------- - _timescaledb_internal._hyper_1_1_chunk - _timescaledb_internal._hyper_1_2_chunk - _timescaledb_internal._hyper_1_3_chunk - _timescaledb_internal._hyper_1_4_chunk -(4 rows) - -ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); -NOTICE: defaulting compress_segmentby to device -NOTICE: defaulting compress_orderby to hour -SELECT compress_chunk(c) -FROM show_chunks('contagg') c ORDER BY c LIMIT 1; - compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_2_10_chunk -(1 row) - --- Turn of real-time aggregation -ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); -ANALYZE normal, hyper, part; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 65536, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 697 + - }, + - "hypertables": { + - "heap_size": 73728, + - "toast_size": 32768, + - "compression": { + - "compressed_heap_size": 32768, + - "compressed_row_count": 4, + - "compressed_toast_size": 32768, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 32768, + - "uncompressed_row_count": 284, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 65536, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 122880, + - "num_children": 9, + - "num_relations": 1, + - "num_reltuples": 413 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 98304, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "continuous_aggregates": { + - "heap_size": 180224, + - "toast_size": 24576, + - "compression": { + - "compressed_heap_size": 40960, + - "compressed_row_count": 10, + - "num_compressed_caggs": 1, + - "compressed_toast_size": 8192, + - "num_compressed_chunks": 1, + - "uncompressed_heap_size": 49152, + - "uncompressed_row_count": 452, + - "compressed_indexes_size": 16384, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 81920 + - }, + - "indexes_size": 180224, + - "num_children": 4, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 1 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + - } -(1 row) - --- check telemetry for fixed schedule jobs works -create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ -begin -raise log 'this is job_test_fixed'; -end -$$; -create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ -begin -raise log 'this is job_test_drifting'; -end -$$; --- before adding the jobs -select get_telemetry_report()->'num_user_defined_actions_fixed'; - ?column? ----------- - 0 -(1 row) - -select get_telemetry_report()->'num_user_defined_actions'; - ?column? ----------- - 0 -(1 row) - -select add_job('job_test_fixed', '1 week'); - add_job ---------- - 1000 -(1 row) - -select add_job('job_test_drifting', '1 week', fixed_schedule => false); - add_job ---------- - 1001 -(1 row) - --- add continuous aggregate refresh policy for contagg -select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting - add_continuous_aggregate_policy ---------------------------------- - 1002 -(1 row) - -select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed - add_continuous_aggregate_policy ---------------------------------- - 1003 -(1 row) - --- add retention policy, fixed -select add_retention_policy('hyper', interval '1 year', initial_start => now()); - add_retention_policy ----------------------- - 1004 -(1 row) - --- add compression policy -select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); - add_compression_policy ------------------------- - 1005 -(1 row) - -select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; - uda_fixed | uda_drifting ------------+-------------- - 1 | 1 -(1 row) - -select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; - contagg_fixed | contagg_drifting ----------------+------------------ - 1 | 1 -(1 row) - -select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; - compress_fixed | retention_fixed -----------------+----------------- - 1 | 1 -(1 row) - -DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; -TRUNCATE _timescaledb_internal.job_errors; --- create some "errors" for testing -INSERT INTO -_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) -VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), -(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), -(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'), -(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'), -(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'), --- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions -(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'); --- create some errors for them -INSERT INTO -_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) -values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), -(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), -(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), -(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'), -(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'), -(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'), -(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'); --- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs -SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); - jsonb_pretty ----------------------------------------------- - { + - "policy_retention": { + - "P0001": 1 + - }, + - "policy_compression": { + - "JF009": 1 + - }, + - "user_defined_action": { + - "ABCDE": 1, + - "P0001": 2 + - }, + - "policy_refresh_continuous_aggregate": {+ - "P0001": 2 + - } + - } -(1 row) - --- for job statistics, insert some records into bgw_job_stats -INSERT INTO _timescaledb_internal.bgw_job_stat -values -(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); -SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); - jsonb_pretty ------------------------------------------------- - { + - "policy_retention": { + - "total_runs": 1, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 1, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 2 secs",+ - "max_consecutive_failures": 1 + - }, + - "policy_compression": { + - "total_runs": 1, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 1, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 2 secs",+ - "max_consecutive_failures": 1 + - }, + - "user_defined_action": { + - "total_runs": 2, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 2, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 4 secs",+ - "max_consecutive_failures": 1 + - }, + - "policy_refresh_continuous_aggregate": { + - "total_runs": 2, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 2, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 4 secs",+ - "max_consecutive_failures": 1 + - } + - } -(1 row) - --- create nested continuous aggregates - copied from cagg_on_cagg_common -CREATE TABLE conditions ( - time timestamptz NOT NULL, - temperature int -); -SELECT create_hypertable('conditions', 'time'); - create_hypertable -------------------------- - (6,public,conditions,t) -(1 row) - -CREATE MATERIALIZED VIEW conditions_summary_hourly_1 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - time_bucket('1 hour', "time") AS bucket, - SUM(temperature) AS temperature -FROM conditions -GROUP BY 1 -WITH NO DATA; -CREATE MATERIALIZED VIEW conditions_summary_daily_2 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - time_bucket('1 day', "bucket") AS bucket, - SUM(temperature) AS temperature -FROM conditions_summary_hourly_1 -GROUP BY 1 -WITH NO DATA; -CREATE MATERIALIZED VIEW conditions_summary_weekly_3 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - time_bucket('1 week', "bucket") AS bucket, - SUM(temperature) AS temperature -FROM conditions_summary_daily_2 -GROUP BY 1 -WITH NO DATA; -SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); - jsonb_pretty --------------- - 2 -(1 row) - -DROP VIEW relations; -DROP MATERIALIZED VIEW telemetry_report; diff --git a/tsl/test/isolation/expected/compression_freeze.out b/tsl/test/isolation/expected/compression_freeze.out new file mode 100644 index 00000000000..8beda3f4b55 --- /dev/null +++ b/tsl/test/isolation/expected/compression_freeze.out @@ -0,0 +1,145 @@ +Parsed test spec with 2 sessions + +starting permutation: s1_select_count s2_select_count_and_stats +step s1_select_count: + SELECT count(*) FROM sensor_data; + +count +----- +16850 +(1 row) + +step s2_select_count_and_stats: + SELECT count(*) FROM sensor_data; + SELECT chunk_schema, chunk_name, compression_status FROM chunk_compression_stats('sensor_data') ORDER BY 1, 2, 3; + +count +----- +16850 +(1 row) + +chunk_schema |chunk_name |compression_status +---------------------+--------------------+------------------ +_timescaledb_internal|_hyper_X_X_chunk|Uncompressed +_timescaledb_internal|_hyper_X_X_chunk|Uncompressed +(2 rows) + + +starting permutation: s1_select_count s1_compress s1_select_count s2_select_count_and_stats +step s1_select_count: + SELECT count(*) FROM sensor_data; + +count +----- +16850 +(1 row) + +step s1_compress: + SELECT count(*) FROM (SELECT compress_chunk(i, if_not_compressed => true) FROM show_chunks('sensor_data') i) i; + +count +----- + 2 +(1 row) + +step s1_select_count: + SELECT count(*) FROM sensor_data; + +count +----- +16850 +(1 row) + +step s2_select_count_and_stats: + SELECT count(*) FROM sensor_data; + SELECT chunk_schema, chunk_name, compression_status FROM chunk_compression_stats('sensor_data') ORDER BY 1, 2, 3; + +count +----- +16850 +(1 row) + +chunk_schema |chunk_name |compression_status +---------------------+--------------------+------------------ +_timescaledb_internal|_hyper_X_X_chunk|Compressed +_timescaledb_internal|_hyper_X_X_chunk|Compressed +(2 rows) + + +starting permutation: s2_lock_compression s2_select_count_and_stats s1_compress s2_select_count_and_stats s2_unlock_compression s2_select_count_and_stats +step s2_lock_compression: + SELECT debug_waitpoint_enable('compression_done_before_truncate_uncompressed'); + +debug_waitpoint_enable +---------------------- + +(1 row) + +step s2_select_count_and_stats: + SELECT count(*) FROM sensor_data; + SELECT chunk_schema, chunk_name, compression_status FROM chunk_compression_stats('sensor_data') ORDER BY 1, 2, 3; + +count +----- +16850 +(1 row) + +chunk_schema |chunk_name |compression_status +---------------------+--------------------+------------------ +_timescaledb_internal|_hyper_X_X_chunk|Uncompressed +_timescaledb_internal|_hyper_X_X_chunk|Uncompressed +(2 rows) + +step s1_compress: + SELECT count(*) FROM (SELECT compress_chunk(i, if_not_compressed => true) FROM show_chunks('sensor_data') i) i; + +step s2_select_count_and_stats: + SELECT count(*) FROM sensor_data; + SELECT chunk_schema, chunk_name, compression_status FROM chunk_compression_stats('sensor_data') ORDER BY 1, 2, 3; + +count +----- +16850 +(1 row) + +chunk_schema |chunk_name |compression_status +---------------------+--------------------+------------------ +_timescaledb_internal|_hyper_X_X_chunk|Uncompressed +_timescaledb_internal|_hyper_X_X_chunk|Uncompressed +(2 rows) + +step s2_unlock_compression: + SELECT locktype, mode, granted, objid FROM pg_locks WHERE not granted AND (locktype = 'advisory' or relation::regclass::text LIKE '%chunk') ORDER BY relation, locktype, mode, granted; + SELECT debug_waitpoint_release('compression_done_before_truncate_uncompressed'); + +locktype|mode |granted| objid +--------+---------+-------+--------- +advisory|ShareLock|f |113732026 +(1 row) + +debug_waitpoint_release +----------------------- + +(1 row) + +step s1_compress: <... completed> +count +----- + 2 +(1 row) + +step s2_select_count_and_stats: + SELECT count(*) FROM sensor_data; + SELECT chunk_schema, chunk_name, compression_status FROM chunk_compression_stats('sensor_data') ORDER BY 1, 2, 3; + +count +----- +16850 +(1 row) + +chunk_schema |chunk_name |compression_status +---------------------+--------------------+------------------ +_timescaledb_internal|_hyper_X_X_chunk|Compressed +_timescaledb_internal|_hyper_X_X_chunk|Compressed +(2 rows) + diff --git a/tsl/test/isolation/specs/CMakeLists.txt b/tsl/test/isolation/specs/CMakeLists.txt index 6868629e72e..fb103b5001b 100644 --- a/tsl/test/isolation/specs/CMakeLists.txt +++ b/tsl/test/isolation/specs/CMakeLists.txt @@ -27,7 +27,7 @@ endif() if(CMAKE_BUILD_TYPE MATCHES Debug) list(APPEND TEST_TEMPLATES_MODULE ${TEST_TEMPLATES_MODULE_DEBUG}) - list(APPEND TEST_FILES compression_chunk_race.spec + list(APPEND TEST_FILES compression_chunk_race.spec compression_freeze.spec compression_merge_race.spec decompression_chunk_and_parallel_query_wo_idx.spec) if(PG_VERSION VERSION_GREATER_EQUAL "14.0") diff --git a/tsl/test/isolation/specs/compression_freeze.spec b/tsl/test/isolation/specs/compression_freeze.spec new file mode 100644 index 00000000000..618a5010d26 --- /dev/null +++ b/tsl/test/isolation/specs/compression_freeze.spec @@ -0,0 +1,68 @@ +# This file and its contents are licensed under the Timescale License. +# Please see the included NOTICE for copyright information and +# LICENSE-TIMESCALE for a copy of the license. + +### +# This test verifies if the compressed and uncompressed data are seen in parallel. Since +# we freeze the compressed data immediately, it becomes visible to all transactions +# that are running concurrently. However, parallel transactions should not be able to +# see the compressed hypertable in the catalog and query the data two times. +### + +setup { + CREATE TABLE sensor_data ( + time timestamptz not null, + sensor_id integer not null, + cpu double precision null, + temperature double precision null); + + -- Create large chunks that take a long time to compress + SELECT FROM create_hypertable('sensor_data','time', chunk_time_interval => INTERVAL '14 days'); + + INSERT INTO sensor_data + SELECT + time + (INTERVAL '1 minute' * random()) AS time, + sensor_id, + random() AS cpu, + random()* 100 AS temperature + FROM + generate_series('2022-01-01', '2022-01-15', INTERVAL '1 hour') AS g1(time), + generate_series(1, 50, 1) AS g2(sensor_id) + ORDER BY time; + + SELECT count(*) FROM sensor_data; + + ALTER TABLE sensor_data SET (timescaledb.compress, timescaledb.compress_segmentby = 'sensor_id'); +} + +teardown { + DROP TABLE sensor_data; +} + +session "s1" +step "s1_compress" { + SELECT count(*) FROM (SELECT compress_chunk(i, if_not_compressed => true) FROM show_chunks('sensor_data') i) i; +} + +step "s1_select_count" { + SELECT count(*) FROM sensor_data; +} + +session "s2" +step "s2_select_count_and_stats" { + SELECT count(*) FROM sensor_data; + SELECT chunk_schema, chunk_name, compression_status FROM chunk_compression_stats('sensor_data') ORDER BY 1, 2, 3; +} + +step "s2_lock_compression" { + SELECT debug_waitpoint_enable('compression_done_before_truncate_uncompressed'); +} + +step "s2_unlock_compression" { + SELECT locktype, mode, granted, objid FROM pg_locks WHERE not granted AND (locktype = 'advisory' or relation::regclass::text LIKE '%chunk') ORDER BY relation, locktype, mode, granted; + SELECT debug_waitpoint_release('compression_done_before_truncate_uncompressed'); +} + +permutation "s1_select_count" "s2_select_count_and_stats" +permutation "s1_select_count" "s1_compress" "s1_select_count" "s2_select_count_and_stats" +permutation "s2_lock_compression" "s2_select_count_and_stats" "s1_compress" "s2_select_count_and_stats" "s2_unlock_compression" "s2_select_count_and_stats" diff --git a/tsl/test/sql/.gitignore b/tsl/test/sql/.gitignore index dd66d992c90..ff836c95834 100644 --- a/tsl/test/sql/.gitignore +++ b/tsl/test/sql/.gitignore @@ -1,4 +1,5 @@ /*.pgbinary +/bgw_custom-*.sql /cagg_bgw-*.sql /cagg_ddl-*.sql /cagg_errors_deprecated-*.sql @@ -8,6 +9,7 @@ /cagg_repair-*.sql /cagg_union_view-*.sql /cagg_usage-*.sql +/compression_bgw-*.sql /compression_errors-*.sql /compression_sorted_merge-*.sql /compression_permissions-*.sql @@ -29,4 +31,5 @@ /remote-copy-*sv /transparent_decompression-*.sql /transparent_decompression_ordered_index-*.sql +/telemetry_stats-*.sql /merge_append_partially_compressed-*.sql diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index a30fb89e934..18f7c623c1d 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -4,7 +4,6 @@ include(GenerateTestSchedule) # so unless you have a good reason, add new test files here. set(TEST_FILES agg_partials_pushdown.sql - bgw_custom.sql bgw_security.sql bgw_policy.sql cagg_errors.sql @@ -15,7 +14,6 @@ set(TEST_FILES cagg_watermark.sql compressed_collation.sql compression_create_compressed_table.sql - compression_bgw.sql compression_conflicts.sql compression_insert.sql compression_policy.sql @@ -84,9 +82,6 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) recompress_chunk_segmentwise.sql transparent_decompression_join_index.sql feature_flags.sql) - if(USE_TELEMETRY) - list(APPEND TEST_FILES telemetry_stats.sql) - endif() if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16") list( @@ -175,6 +170,8 @@ if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16") endif() set(TEST_TEMPLATES + bgw_custom.sql.in + compression_bgw.sql.in compression_sorted_merge.sql.in cagg_union_view.sql.in plan_skip_scan.sql.in @@ -201,6 +198,9 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) continuous_aggs.sql.in continuous_aggs_deprecated.sql.in deparse.sql.in) + if(USE_TELEMETRY) + list(APPEND TEST_TEMPLATES telemetry_stats.sql.in) + endif() if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16") list( APPEND diff --git a/tsl/test/sql/bgw_custom.sql b/tsl/test/sql/bgw_custom.sql.in similarity index 100% rename from tsl/test/sql/bgw_custom.sql rename to tsl/test/sql/bgw_custom.sql.in diff --git a/tsl/test/sql/compression_bgw.sql b/tsl/test/sql/compression_bgw.sql.in similarity index 100% rename from tsl/test/sql/compression_bgw.sql rename to tsl/test/sql/compression_bgw.sql.in diff --git a/tsl/test/sql/telemetry_stats.sql b/tsl/test/sql/telemetry_stats.sql.in similarity index 100% rename from tsl/test/sql/telemetry_stats.sql rename to tsl/test/sql/telemetry_stats.sql.in diff --git a/tsl/test/src/test_compression.c b/tsl/test/src/test_compression.c index 39e86671b25..61ade7f4dff 100644 --- a/tsl/test/src/test_compression.c +++ b/tsl/test/src/test_compression.c @@ -715,7 +715,8 @@ ts_compress_table(PG_FUNCTION_ARGS) compress_chunk(in_table, out_table, (const ColumnCompressionInfo **) compression_info->data, - compression_info->num_elements); + compression_info->num_elements, + 0 /*insert options*/); PG_RETURN_VOID(); }