From 88bd04062fb309e2d0f594500ea38611e8005c24 Mon Sep 17 00:00:00 2001 From: Jan Nidzwetzki Date: Thu, 19 Oct 2023 13:40:39 +0200 Subject: [PATCH] Added telemetry for frozen tuples --- sql/pre_install/tables.sql | 1 + sql/updates/latest-dev.sql | 50 + sql/updates/reverse-dev.sql | 50 + src/telemetry/stats.c | 1 + src/telemetry/stats.h | 1 + src/telemetry/telemetry.c | 4 + src/ts_catalog/catalog.h | 2 + tsl/src/chunk_copy.c | 1 + tsl/src/compression/api.c | 11 +- tsl/src/compression/compression.c | 6 + tsl/src/compression/compression.h | 1 + tsl/test/expected/bgw_custom-14.out | 12 + tsl/test/expected/bgw_custom-15.out | 12 + tsl/test/expected/bgw_custom-16.out | 1072 +++++++++++++++ tsl/test/expected/compression.out | 46 +- tsl/test/expected/compression_bgw-14.out | 3 +- tsl/test/expected/compression_bgw-15.out | 3 +- tsl/test/expected/compression_bgw-16.out | 657 ++++++++++ tsl/test/expected/telemetry_stats-13.out | 748 +++++++++++ tsl/test/expected/telemetry_stats-14.out | 748 +++++++++++ tsl/test/expected/telemetry_stats-15.out | 1157 +++++------------ tsl/test/expected/telemetry_stats-16.out | 748 +++++++++++ tsl/test/isolation/specs/CMakeLists.txt | 10 +- tsl/test/sql/.gitignore | 1 + tsl/test/sql/CMakeLists.txt | 6 +- ...metry_stats.sql => telemetry_stats.sql.in} | 0 26 files changed, 4509 insertions(+), 842 deletions(-) create mode 100644 tsl/test/expected/bgw_custom-16.out create mode 100644 tsl/test/expected/compression_bgw-16.out create mode 100644 tsl/test/expected/telemetry_stats-13.out create mode 100644 tsl/test/expected/telemetry_stats-14.out create mode 100644 tsl/test/expected/telemetry_stats-16.out rename tsl/test/sql/{telemetry_stats.sql => telemetry_stats.sql.in} (100%) diff --git a/sql/pre_install/tables.sql b/sql/pre_install/tables.sql index 5a2d36d0a11..85d45f1728f 100644 --- a/sql/pre_install/tables.sql +++ b/sql/pre_install/tables.sql @@ -495,6 +495,7 @@ CREATE TABLE _timescaledb_catalog.compression_chunk_size ( compressed_index_size bigint NOT NULL, numrows_pre_compression bigint, numrows_post_compression bigint, + numrows_frozen_immediately bigint, -- table constraints CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id), CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE, diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql index b8d310f00f3..90775394522 100644 --- a/sql/updates/latest-dev.sql +++ b/sql/updates/latest-dev.sql @@ -179,3 +179,53 @@ DROP TABLE _timescaledb_internal.tmp_chunk_seq_value; GRANT SELECT ON _timescaledb_catalog.chunk_id_seq TO PUBLIC; GRANT SELECT ON _timescaledb_catalog.chunk TO PUBLIC; -- end recreate _timescaledb_catalog.chunk table -- + +-- +-- Rebuild the catalog table `_timescaledb_catalog.compression_chunk_size` to +-- add new column `numrows_frozen_immediately` +-- +CREATE TABLE _timescaledb_internal.compression_chunk_size_tmp + AS SELECT * from _timescaledb_catalog.compression_chunk_size; + +-- Drop depended views +-- We assume that '_timescaledb_internal.compressed_chunk_stats' was already dropped in this update +-- (see above) + +-- Drop table +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.compression_chunk_size; +DROP TABLE _timescaledb_catalog.compression_chunk_size; + +CREATE TABLE _timescaledb_catalog.compression_chunk_size ( + chunk_id integer NOT NULL, + compressed_chunk_id integer NOT NULL, + uncompressed_heap_size bigint NOT NULL, + uncompressed_toast_size bigint NOT NULL, + uncompressed_index_size bigint NOT NULL, + compressed_heap_size bigint NOT NULL, + compressed_toast_size bigint NOT NULL, + compressed_index_size bigint NOT NULL, + numrows_pre_compression bigint, + numrows_post_compression bigint, + numrows_frozen_immediately bigint, + -- table constraints + CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id), + CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE, + CONSTRAINT compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE +); + +INSERT INTO _timescaledb_catalog.compression_chunk_size +(chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, + uncompressed_index_size, compressed_heap_size, compressed_toast_size, + compressed_index_size, numrows_pre_compression, numrows_post_compression, numrows_frozen_immediately) +SELECT chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, + uncompressed_index_size, compressed_heap_size, compressed_toast_size, + compressed_index_size, numrows_pre_compression, numrows_post_compression, 0 +FROM _timescaledb_internal.compression_chunk_size_tmp; + +DROP TABLE _timescaledb_internal.compression_chunk_size_tmp; + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', ''); + +GRANT SELECT ON _timescaledb_catalog.compression_chunk_size TO PUBLIC; + +-- End modify `_timescaledb_catalog.compression_chunk_size` diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql index 688fbe20eb2..b1c25e80c6a 100644 --- a/sql/updates/reverse-dev.sql +++ b/sql/updates/reverse-dev.sql @@ -124,3 +124,53 @@ GRANT SELECT ON _timescaledb_catalog.chunk_id_seq TO PUBLIC; GRANT SELECT ON _timescaledb_catalog.chunk TO PUBLIC; -- end recreate _timescaledb_catalog.chunk table -- + + +-- +-- Rebuild the catalog table `_timescaledb_catalog.compression_chunk_size` to +-- remove column `numrows_frozen_immediately` +-- +CREATE TABLE _timescaledb_internal.compression_chunk_size_tmp + AS SELECT * from _timescaledb_catalog.compression_chunk_size; + +-- Drop depended views +-- We assume that '_timescaledb_internal.compressed_chunk_stats' was already dropped in this update +-- (see above) + +-- Drop table +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.compression_chunk_size; +DROP TABLE _timescaledb_catalog.compression_chunk_size; + +CREATE TABLE _timescaledb_catalog.compression_chunk_size ( + chunk_id integer NOT NULL, + compressed_chunk_id integer NOT NULL, + uncompressed_heap_size bigint NOT NULL, + uncompressed_toast_size bigint NOT NULL, + uncompressed_index_size bigint NOT NULL, + compressed_heap_size bigint NOT NULL, + compressed_toast_size bigint NOT NULL, + compressed_index_size bigint NOT NULL, + numrows_pre_compression bigint, + numrows_post_compression bigint, + -- table constraints + CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id), + CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE, + CONSTRAINT compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE +); + +INSERT INTO _timescaledb_catalog.compression_chunk_size +(chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, + uncompressed_index_size, compressed_heap_size, compressed_toast_size, + compressed_index_size, numrows_pre_compression, numrows_post_compression) +SELECT chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size, + uncompressed_index_size, compressed_heap_size, compressed_toast_size, + compressed_index_size, numrows_pre_compression, numrows_post_compression +FROM _timescaledb_internal.compression_chunk_size_tmp; + +DROP TABLE _timescaledb_internal.compression_chunk_size_tmp; + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', ''); + +GRANT SELECT ON _timescaledb_catalog.compression_chunk_size TO PUBLIC; + +-- End modify `_timescaledb_catalog.compression_chunk_size` diff --git a/src/telemetry/stats.c b/src/telemetry/stats.c index f6b3932c6f0..316e735bcd8 100644 --- a/src/telemetry/stats.c +++ b/src/telemetry/stats.c @@ -335,6 +335,7 @@ add_chunk_stats(HyperStats *stats, Form_pg_class class, const Chunk *chunk, stats->uncompressed_toast_size += fd_compr->uncompressed_toast_size; stats->uncompressed_row_count += fd_compr->numrows_pre_compression; stats->compressed_row_count += fd_compr->numrows_post_compression; + stats->compressed_row_frozen_immediately_count += fd_compr->numrows_frozen_immediately; /* Also add compressed sizes to total number for entire table */ stats->storage.relsize.heap_size += fd_compr->compressed_heap_size; diff --git a/src/telemetry/stats.h b/src/telemetry/stats.h index d765e906ad0..777779181ec 100644 --- a/src/telemetry/stats.h +++ b/src/telemetry/stats.h @@ -64,6 +64,7 @@ typedef struct HyperStats int64 compressed_indexes_size; int64 compressed_toast_size; int64 compressed_row_count; + int64 compressed_row_frozen_immediately_count; int64 uncompressed_heap_size; int64 uncompressed_indexes_size; int64 uncompressed_toast_size; diff --git a/src/telemetry/telemetry.c b/src/telemetry/telemetry.c index 8b0c099674a..16d270ab784 100644 --- a/src/telemetry/telemetry.c +++ b/src/telemetry/telemetry.c @@ -604,6 +604,7 @@ format_iso8601(Datum value) #define REQ_RELKIND_COMPRESSED_TOAST_SIZE "compressed_toast_size" #define REQ_RELKIND_COMPRESSED_INDEXES_SIZE "compressed_indexes_size" #define REQ_RELKIND_COMPRESSED_ROWCOUNT "compressed_row_count" +#define REQ_RELKIND_COMPRESSED_ROWCOUNT_FROZEN_IMMEDIATELY "compressed_row_count_frozen_immediately" #define REQ_RELKIND_CAGG_ON_DISTRIBUTED_HYPERTABLE_COUNT "num_caggs_on_distributed_hypertables" #define REQ_RELKIND_CAGG_USES_REAL_TIME_AGGREGATION_COUNT "num_caggs_using_real_time_aggregation" @@ -639,6 +640,9 @@ add_compression_stats_object(JsonbParseState *parse_state, StatsRelType reltype, ts_jsonb_add_int64(parse_state, REQ_RELKIND_COMPRESSED_INDEXES_SIZE, hs->compressed_indexes_size); + ts_jsonb_add_int64(parse_state, + REQ_RELKIND_COMPRESSED_ROWCOUNT_FROZEN_IMMEDIATELY, + hs->compressed_row_frozen_immediately_count); ts_jsonb_add_int64(parse_state, REQ_RELKIND_UNCOMPRESSED_ROWCOUNT, hs->uncompressed_row_count); ts_jsonb_add_int64(parse_state, REQ_RELKIND_UNCOMPRESSED_HEAP_SIZE, hs->uncompressed_heap_size); ts_jsonb_add_int64(parse_state, diff --git a/src/ts_catalog/catalog.h b/src/ts_catalog/catalog.h index c7c683e22b8..aa91278579b 100644 --- a/src/ts_catalog/catalog.h +++ b/src/ts_catalog/catalog.h @@ -1289,6 +1289,7 @@ typedef enum Anum_compression_chunk_size Anum_compression_chunk_size_compressed_index_size, Anum_compression_chunk_size_numrows_pre_compression, Anum_compression_chunk_size_numrows_post_compression, + Anum_compression_chunk_size_numrows_frozen_immediately, _Anum_compression_chunk_size_max, } Anum_compression_chunk_size; @@ -1306,6 +1307,7 @@ typedef struct FormData_compression_chunk_size int64 compressed_index_size; int64 numrows_pre_compression; int64 numrows_post_compression; + int64 numrows_frozen_immediately; } FormData_compression_chunk_size; typedef FormData_compression_chunk_size *Form_compression_chunk_size; diff --git a/tsl/src/chunk_copy.c b/tsl/src/chunk_copy.c index ae0ffcdee0e..b81107347ae 100644 --- a/tsl/src/chunk_copy.c +++ b/tsl/src/chunk_copy.c @@ -549,6 +549,7 @@ chunk_copy_get_source_compressed_chunk_stats(ChunkCopy *cc) cc->fd_ccs.compressed_index_size = atoll(PQgetvalue(res, 0, 5)); cc->fd_ccs.numrows_pre_compression = atoll(PQgetvalue(res, 0, 6)); cc->fd_ccs.numrows_post_compression = atoll(PQgetvalue(res, 0, 7)); + cc->fd_ccs.numrows_frozen_immediately = 0; ts_dist_cmd_close_response(dist_res); } diff --git a/tsl/src/compression/api.c b/tsl/src/compression/api.c index 22988863b01..0914d08de18 100644 --- a/tsl/src/compression/api.c +++ b/tsl/src/compression/api.c @@ -58,7 +58,8 @@ typedef struct CompressChunkCxt static void compression_chunk_size_catalog_insert(int32 src_chunk_id, const RelationSize *src_size, int32 compress_chunk_id, const RelationSize *compress_size, - int64 rowcnt_pre_compression, int64 rowcnt_post_compression) + int64 rowcnt_pre_compression, int64 rowcnt_post_compression, + int64 rowcnt_frozen) { Catalog *catalog = ts_catalog_get(); Relation rel; @@ -93,6 +94,8 @@ compression_chunk_size_catalog_insert(int32 src_chunk_id, const RelationSize *sr Int64GetDatum(rowcnt_pre_compression); values[AttrNumberGetAttrOffset(Anum_compression_chunk_size_numrows_post_compression)] = Int64GetDatum(rowcnt_post_compression); + values[AttrNumberGetAttrOffset(Anum_compression_chunk_size_numrows_frozen_immediately)] = + Int64GetDatum(rowcnt_frozen); ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx); ts_catalog_insert_values(rel, desc, values, nulls); @@ -536,7 +539,8 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid) compress_ht_chunk->fd.id, &after_size, cstat.rowcnt_pre_compression, - cstat.rowcnt_post_compression); + cstat.rowcnt_post_compression, + cstat.rowcnt_frozen); /* Copy chunk constraints (including fkey) to compressed chunk. * Do this after compressing the chunk to avoid holding strong, unnecessary locks on the @@ -833,7 +837,8 @@ tsl_create_compressed_chunk(PG_FUNCTION_ARGS) compress_ht_chunk->fd.id, &compressed_size, numrows_pre_compression, - numrows_post_compression); + numrows_post_compression, + 0); chunk_was_compressed = ts_chunk_is_compressed(cxt.srcht_chunk); ts_chunk_set_compressed_chunk(cxt.srcht_chunk, compress_ht_chunk->fd.id); diff --git a/tsl/src/compression/compression.c b/tsl/src/compression/compression.c index 1e31155df39..c786cc6cbcc 100644 --- a/tsl/src/compression/compression.c +++ b/tsl/src/compression/compression.c @@ -451,6 +451,12 @@ compress_chunk(Oid in_table, Oid out_table, const ColumnCompressionInfo **column table_close(in_rel, NoLock); cstat.rowcnt_pre_compression = row_compressor.rowcnt_pre_compression; cstat.rowcnt_post_compression = row_compressor.num_compressed_rows; + + if ((insert_options & HEAP_INSERT_FROZEN) == HEAP_INSERT_FROZEN) + cstat.rowcnt_frozen = row_compressor.num_compressed_rows; + else + cstat.rowcnt_frozen = 0; + return cstat; } diff --git a/tsl/src/compression/compression.h b/tsl/src/compression/compression.h index ad741722b0d..1ab39075484 100644 --- a/tsl/src/compression/compression.h +++ b/tsl/src/compression/compression.h @@ -201,6 +201,7 @@ typedef struct CompressionStats { int64 rowcnt_pre_compression; int64 rowcnt_post_compression; + int64 rowcnt_frozen; } CompressionStats; typedef struct PerColumn diff --git a/tsl/test/expected/bgw_custom-14.out b/tsl/test/expected/bgw_custom-14.out index 49559a248a4..6f308e7d361 100644 --- a/tsl/test/expected/bgw_custom-14.out +++ b/tsl/test/expected/bgw_custom-14.out @@ -972,7 +972,19 @@ SELECT count(*) = 0 (1 row) -- cleanup +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + DROP TABLE sensor_data; +SELECT _timescaledb_functions.restart_background_workers(); + restart_background_workers +---------------------------- + t +(1 row) + -- Github issue #5537 -- Proc that waits until the given job enters the expected state CREATE OR REPLACE PROCEDURE wait_for_job_status(job_param_id INTEGER, expected_status TEXT, spins INTEGER=:TEST_SPINWAIT_ITERS) diff --git a/tsl/test/expected/bgw_custom-15.out b/tsl/test/expected/bgw_custom-15.out index 49559a248a4..6f308e7d361 100644 --- a/tsl/test/expected/bgw_custom-15.out +++ b/tsl/test/expected/bgw_custom-15.out @@ -972,7 +972,19 @@ SELECT count(*) = 0 (1 row) -- cleanup +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + DROP TABLE sensor_data; +SELECT _timescaledb_functions.restart_background_workers(); + restart_background_workers +---------------------------- + t +(1 row) + -- Github issue #5537 -- Proc that waits until the given job enters the expected state CREATE OR REPLACE PROCEDURE wait_for_job_status(job_param_id INTEGER, expected_status TEXT, spins INTEGER=:TEST_SPINWAIT_ITERS) diff --git a/tsl/test/expected/bgw_custom-16.out b/tsl/test/expected/bgw_custom-16.out new file mode 100644 index 00000000000..6f308e7d361 --- /dev/null +++ b/tsl/test/expected/bgw_custom-16.out @@ -0,0 +1,1072 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE TABLE custom_log(job_id int, args jsonb, extra text, runner NAME DEFAULT CURRENT_ROLE); +CREATE OR REPLACE FUNCTION custom_func(jobid int, args jsonb) RETURNS VOID LANGUAGE SQL AS +$$ + INSERT INTO custom_log VALUES($1, $2, 'custom_func'); +$$; +CREATE OR REPLACE FUNCTION custom_func_definer(jobid int, args jsonb) RETURNS VOID LANGUAGE SQL AS +$$ + INSERT INTO custom_log VALUES($1, $2, 'security definer'); +$$ SECURITY DEFINER; +CREATE OR REPLACE PROCEDURE custom_proc(job_id int, args jsonb) LANGUAGE SQL AS +$$ + INSERT INTO custom_log VALUES($1, $2, 'custom_proc'); +$$; +-- procedure with transaction handling +CREATE OR REPLACE PROCEDURE custom_proc2(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + INSERT INTO custom_log VALUES($1, $2, 'custom_proc2 1 COMMIT ' || (args->>'type')); + COMMIT; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc2 2 ROLLBACK ' || (args->>'type')); + ROLLBACK; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc2 3 COMMIT ' || (args->>'type')); + COMMIT; +END +$$; +\set ON_ERROR_STOP 0 +-- test bad input +SELECT add_job(NULL, '1h'); +ERROR: function or procedure cannot be NULL +SELECT add_job(0, '1h'); +ERROR: function or procedure with OID 0 does not exist +-- this will return an error about Oid 4294967295 +-- while regproc is unsigned int postgres has an implicit cast from int to regproc +SELECT add_job(-1, '1h'); +ERROR: function or procedure with OID 4294967295 does not exist +SELECT add_job('invalid_func', '1h'); +ERROR: function "invalid_func" does not exist at character 16 +SELECT add_job('custom_func', NULL); +ERROR: schedule interval cannot be NULL +SELECT add_job('custom_func', 'invalid interval'); +ERROR: invalid input syntax for type interval: "invalid interval" at character 31 +\set ON_ERROR_STOP 1 +select '2000-01-01 00:00:00+00' as time_zero \gset +SELECT add_job('custom_func','1h', config:='{"type":"function"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1000 +(1 row) + +SELECT add_job('custom_proc','1h', config:='{"type":"procedure"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1001 +(1 row) + +SELECT add_job('custom_proc2','1h', config:= '{"type":"procedure"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1002 +(1 row) + +SELECT add_job('custom_func', '1h', config:='{"type":"function"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1003 +(1 row) + +SELECT add_job('custom_func_definer', '1h', config:='{"type":"function"}'::jsonb, initial_start => :'time_zero'::TIMESTAMPTZ); + add_job +--------- + 1004 +(1 row) + +-- exclude the telemetry[1] and job error retention[2] jobs +-- job 2 may have already run which will set its next_start field thus making the test flaky +SELECT * FROM timescaledb_information.jobs WHERE job_id NOT IN (1,2) ORDER BY 1; + job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name +--------+----------------------------+-------------------+-------------+-------------+--------------+-------------+---------------------+-------------------+-----------+----------------+-----------------------+------------------------------+------------------------------+-------------------+-----------------+--------------+------------ + 1000 | User-Defined Action [1000] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_func | default_perm_user | t | t | {"type": "function"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1001 | User-Defined Action [1001] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_proc | default_perm_user | t | t | {"type": "procedure"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1002 | User-Defined Action [1002] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_proc2 | default_perm_user | t | t | {"type": "procedure"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1003 | User-Defined Action [1003] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_func | default_perm_user | t | t | {"type": "function"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | + 1004 | User-Defined Action [1004] | @ 1 hour | @ 0 | -1 | @ 5 mins | public | custom_func_definer | default_perm_user | t | t | {"type": "function"} | Fri Dec 31 16:00:00 1999 PST | Fri Dec 31 16:00:00 1999 PST | | | | +(5 rows) + +SELECT count(*) FROM _timescaledb_config.bgw_job WHERE config->>'type' IN ('procedure', 'function'); + count +------- + 5 +(1 row) + +\set ON_ERROR_STOP 0 +-- test bad input +CALL run_job(NULL); +ERROR: job ID cannot be NULL +CALL run_job(-1); +ERROR: job -1 not found +\set ON_ERROR_STOP 1 +CALL run_job(1000); +CALL run_job(1001); +CALL run_job(1002); +CALL run_job(1003); +CALL run_job(1004); +SELECT * FROM custom_log ORDER BY job_id, extra; + job_id | args | extra | runner +--------+-----------------------+---------------------------------+------------------- + 1000 | {"type": "function"} | custom_func | default_perm_user + 1001 | {"type": "procedure"} | custom_proc | default_perm_user + 1002 | {"type": "procedure"} | custom_proc2 1 COMMIT procedure | default_perm_user + 1002 | {"type": "procedure"} | custom_proc2 3 COMMIT procedure | default_perm_user + 1003 | {"type": "function"} | custom_func | default_perm_user + 1004 | {"type": "function"} | security definer | default_perm_user +(6 rows) + +\set ON_ERROR_STOP 0 +-- test bad input +SELECT delete_job(NULL); + delete_job +------------ + +(1 row) + +SELECT delete_job(-1); +ERROR: job -1 not found +\set ON_ERROR_STOP 1 +-- We keep job 1000 for some additional checks. +SELECT delete_job(1001); + delete_job +------------ + +(1 row) + +SELECT delete_job(1002); + delete_job +------------ + +(1 row) + +SELECT delete_job(1003); + delete_job +------------ + +(1 row) + +SELECT delete_job(1004); + delete_job +------------ + +(1 row) + +-- check jobs got removed +SELECT count(*) FROM timescaledb_information.jobs WHERE job_id >= 1001; + count +------- + 0 +(1 row) + +\c :TEST_DBNAME :ROLE_SUPERUSER +\set ON_ERROR_STOP 0 +-- test bad input +SELECT alter_job(NULL, if_exists => false); +ERROR: job ID cannot be NULL +SELECT alter_job(-1, if_exists => false); +ERROR: job -1 not found +\set ON_ERROR_STOP 1 +-- test bad input but don't fail +SELECT alter_job(NULL, if_exists => true); +NOTICE: job 0 not found, skipping + alter_job +----------- + +(1 row) + +SELECT alter_job(-1, if_exists => true); +NOTICE: job -1 not found, skipping + alter_job +----------- + +(1 row) + +-- test altering job with NULL config +SELECT job_id FROM alter_job(1000,scheduled:=false); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+---------------------- + f | {"type": "function"} +(1 row) + +-- test updating job settings +SELECT job_id FROM alter_job(1000,config:='{"test":"test"}'); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+------------------ + f | {"test": "test"} +(1 row) + +SELECT job_id FROM alter_job(1000,scheduled:=true); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+------------------ + t | {"test": "test"} +(1 row) + +SELECT job_id FROM alter_job(1000,scheduled:=false); + job_id +-------- + 1000 +(1 row) + +SELECT scheduled, config FROM timescaledb_information.jobs WHERE job_id = 1000; + scheduled | config +-----------+------------------ + f | {"test": "test"} +(1 row) + +-- Done with job 1000 now, so remove it. +SELECT delete_job(1000); + delete_job +------------ + +(1 row) + +--test for #2793 +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +-- background workers are disabled, so the job will not run -- +SELECT add_job( proc=>'custom_func', + schedule_interval=>'1h', initial_start =>'2018-01-01 10:00:00-05') AS job_id_1 \gset +SELECT job_id, next_start, scheduled, schedule_interval +FROM timescaledb_information.jobs WHERE job_id > 1000; + job_id | next_start | scheduled | schedule_interval +--------+------------------------------+-----------+------------------- + 1005 | Mon Jan 01 07:00:00 2018 PST | t | @ 1 hour +(1 row) + +\x +SELECT * FROM timescaledb_information.job_stats WHERE job_id > 1000; +-[ RECORD 1 ]----------+----------------------------- +hypertable_schema | +hypertable_name | +job_id | 1005 +last_run_started_at | -infinity +last_successful_finish | -infinity +last_run_status | +job_status | Scheduled +last_run_duration | +next_start | Mon Jan 01 07:00:00 2018 PST +total_runs | 0 +total_successes | 0 +total_failures | 0 + +\x +SELECT delete_job(:job_id_1); + delete_job +------------ + +(1 row) + +-- tests for #3545 +CREATE FUNCTION wait_for_job_to_run(job_param_id INTEGER, expected_runs INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + r RECORD; +BEGIN + FOR i in 1..spins + LOOP + SELECT total_successes, total_failures FROM _timescaledb_internal.bgw_job_stat WHERE job_id=job_param_id INTO r; + IF (r.total_failures > 0) THEN + RAISE INFO 'wait_for_job_to_run: job execution failed'; + RETURN false; + ELSEIF (r.total_successes = expected_runs) THEN + RETURN true; + ELSEIF (r.total_successes > expected_runs) THEN + RAISE 'num_runs > expected'; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RAISE INFO 'wait_for_job_to_run: timeout after % tries', spins; + RETURN false; +END +$BODY$; +TRUNCATE custom_log; +-- Nested procedure call +CREATE OR REPLACE PROCEDURE custom_proc_nested(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + INSERT INTO custom_log VALUES($1, $2, 'custom_proc_nested 1 COMMIT'); + COMMIT; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc_nested 2 ROLLBACK'); + ROLLBACK; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc_nested 3 COMMIT'); + COMMIT; +END +$$; +CREATE OR REPLACE PROCEDURE custom_proc3(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + CALL custom_proc_nested(job_id, args); +END +$$; +CREATE OR REPLACE PROCEDURE custom_proc4(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + INSERT INTO custom_log VALUES($1, $2, 'custom_proc4 1 COMMIT'); + COMMIT; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc4 2 ROLLBACK'); + ROLLBACK; + RAISE EXCEPTION 'forced exception'; + INSERT INTO custom_log VALUES($1, $2, 'custom_proc4 3 ABORT'); + COMMIT; +END +$$; +CREATE OR REPLACE PROCEDURE custom_proc5(job_id int, args jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + CALL refresh_continuous_aggregate('conditions_summary_daily', '2021-08-01 00:00', '2021-08-31 00:00'); +END +$$; +-- Remove any default jobs, e.g., telemetry +\c :TEST_DBNAME :ROLE_SUPERUSER +TRUNCATE _timescaledb_config.bgw_job RESTART IDENTITY CASCADE; +NOTICE: truncate cascades to table "bgw_job_stat" +NOTICE: truncate cascades to table "bgw_policy_chunk_stats" +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT add_job('custom_proc2', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_1 \gset +SELECT add_job('custom_proc3', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_2 \gset +\c :TEST_DBNAME :ROLE_SUPERUSER +-- Start Background Workers +SELECT _timescaledb_functions.start_background_workers(); + start_background_workers +-------------------------- + t +(1 row) + +-- Wait for jobs +SELECT wait_for_job_to_run(:job_id_1, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT wait_for_job_to_run(:job_id_2, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +-- Check results +SELECT * FROM custom_log ORDER BY job_id, extra; + job_id | args | extra | runner +--------+-----------------------+---------------------------------+------------------- + 1000 | {"type": "procedure"} | custom_proc2 1 COMMIT procedure | default_perm_user + 1000 | {"type": "procedure"} | custom_proc2 3 COMMIT procedure | default_perm_user + 1001 | {"type": "procedure"} | custom_proc_nested 1 COMMIT | default_perm_user + 1001 | {"type": "procedure"} | custom_proc_nested 3 COMMIT | default_perm_user +(4 rows) + +-- Delete previous jobs +SELECT delete_job(:job_id_1); + delete_job +------------ + +(1 row) + +SELECT delete_job(:job_id_2); + delete_job +------------ + +(1 row) + +TRUNCATE custom_log; +-- Forced Exception +SELECT add_job('custom_proc4', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_3 \gset +SELECT wait_for_job_to_run(:job_id_3, 1); +INFO: wait_for_job_to_run: job execution failed + wait_for_job_to_run +--------------------- + f +(1 row) + +-- Check results +SELECT * FROM custom_log ORDER BY job_id, extra; + job_id | args | extra | runner +--------+-----------------------+-----------------------+------------ + 1002 | {"type": "procedure"} | custom_proc4 1 COMMIT | super_user +(1 row) + +-- Delete previous jobs +SELECT delete_job(:job_id_3); + delete_job +------------ + +(1 row) + +CREATE TABLE conditions ( + time TIMESTAMP NOT NULL, + location TEXT NOT NULL, + location2 char(10) NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL +) WITH (autovacuum_enabled = FALSE); +SELECT create_hypertable('conditions', 'time', chunk_time_interval := '15 days'::interval); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + create_hypertable +------------------------- + (1,public,conditions,t) +(1 row) + +ALTER TABLE conditions + SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'location', + timescaledb.compress_orderby = 'time' +); +INSERT INTO conditions +SELECT generate_series('2021-08-01 00:00'::timestamp, '2021-08-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75; +-- Chunk compress stats +SELECT * FROM _timescaledb_internal.compressed_chunk_stats ORDER BY chunk_name; + hypertable_schema | hypertable_name | chunk_schema | chunk_name | compression_status | uncompressed_heap_size | uncompressed_index_size | uncompressed_toast_size | uncompressed_total_size | compressed_heap_size | compressed_index_size | compressed_toast_size | compressed_total_size +-------------------+-----------------+-----------------------+------------------+--------------------+------------------------+-------------------------+-------------------------+-------------------------+----------------------+-----------------------+-----------------------+----------------------- + public | conditions | _timescaledb_internal | _hyper_1_1_chunk | Uncompressed | | | | | | | | + public | conditions | _timescaledb_internal | _hyper_1_2_chunk | Uncompressed | | | | | | | | + public | conditions | _timescaledb_internal | _hyper_1_3_chunk | Uncompressed | | | | | | | | +(3 rows) + +-- Compression policy +SELECT add_compression_policy('conditions', interval '1 day') AS job_id_4 \gset +SELECT wait_for_job_to_run(:job_id_4, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +-- Chunk compress stats +SELECT * FROM _timescaledb_internal.compressed_chunk_stats ORDER BY chunk_name; + hypertable_schema | hypertable_name | chunk_schema | chunk_name | compression_status | uncompressed_heap_size | uncompressed_index_size | uncompressed_toast_size | uncompressed_total_size | compressed_heap_size | compressed_index_size | compressed_toast_size | compressed_total_size +-------------------+-----------------+-----------------------+------------------+--------------------+------------------------+-------------------------+-------------------------+-------------------------+----------------------+-----------------------+-----------------------+----------------------- + public | conditions | _timescaledb_internal | _hyper_1_1_chunk | Compressed | 8192 | 16384 | 8192 | 32768 | 16384 | 16384 | 8192 | 40960 + public | conditions | _timescaledb_internal | _hyper_1_2_chunk | Compressed | 8192 | 16384 | 8192 | 32768 | 16384 | 16384 | 8192 | 40960 + public | conditions | _timescaledb_internal | _hyper_1_3_chunk | Compressed | 8192 | 16384 | 8192 | 32768 | 16384 | 16384 | 8192 | 40960 +(3 rows) + +--TEST compression job after inserting data into previously compressed chunk +INSERT INTO conditions +SELECT generate_series('2021-08-01 00:00'::timestamp, '2021-08-31 00:00'::timestamp, '1 day'), 'NYC', 'nycity', 40, 40; +SELECT id, table_name, status from _timescaledb_catalog.chunk +where hypertable_id = (select id from _timescaledb_catalog.hypertable + where table_name = 'conditions') +order by id; + id | table_name | status +----+------------------+-------- + 1 | _hyper_1_1_chunk | 9 + 2 | _hyper_1_2_chunk | 9 + 3 | _hyper_1_3_chunk | 9 +(3 rows) + +--running job second time, wait for it to complete +select t.schedule_interval FROM alter_job(:job_id_4, next_start=> now() ) t; + schedule_interval +------------------- + @ 12 hours +(1 row) + +SELECT wait_for_job_to_run(:job_id_4, 2); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT id, table_name, status from _timescaledb_catalog.chunk +where hypertable_id = (select id from _timescaledb_catalog.hypertable + where table_name = 'conditions') +order by id; + id | table_name | status +----+------------------+-------- + 1 | _hyper_1_1_chunk | 1 + 2 | _hyper_1_2_chunk | 1 + 3 | _hyper_1_3_chunk | 1 +(3 rows) + +-- Drop the compression job +SELECT delete_job(:job_id_4); + delete_job +------------ + +(1 row) + +-- Decompress chunks before create the cagg +SELECT decompress_chunk(c) FROM show_chunks('conditions') c; + decompress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk +(3 rows) + +-- TEST Continuous Aggregate job +CREATE MATERIALIZED VIEW conditions_summary_daily +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT location, + time_bucket(INTERVAL '1 day', time) AS bucket, + AVG(temperature), + MAX(temperature), + MIN(temperature) +FROM conditions +GROUP BY location, bucket +WITH NO DATA; +-- Refresh Continous Aggregate by Job +SELECT add_job('custom_proc5', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_5 \gset +SELECT wait_for_job_to_run(:job_id_5, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT count(*) FROM conditions_summary_daily; + count +------- + 62 +(1 row) + +-- TESTs for alter_job_set_hypertable_id API +SELECT _timescaledb_functions.alter_job_set_hypertable_id( :job_id_5, NULL); + alter_job_set_hypertable_id +----------------------------- + 1004 +(1 row) + +SELECT id, proc_name, hypertable_id +FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; + id | proc_name | hypertable_id +------+--------------+--------------- + 1004 | custom_proc5 | +(1 row) + +-- error case, try to associate with a PG relation +\set ON_ERROR_STOP 0 +SELECT _timescaledb_functions.alter_job_set_hypertable_id( :job_id_5, 'custom_log'); +ERROR: relation "custom_log" is not a hypertable or continuous aggregate +\set ON_ERROR_STOP 1 +-- TEST associate the cagg with the job +SELECT _timescaledb_functions.alter_job_set_hypertable_id( :job_id_5, 'conditions_summary_daily'::regclass); + alter_job_set_hypertable_id +----------------------------- + 1004 +(1 row) + +SELECT id, proc_name, hypertable_id +FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; + id | proc_name | hypertable_id +------+--------------+--------------- + 1004 | custom_proc5 | 3 +(1 row) + +--verify that job is dropped when cagg is dropped +DROP MATERIALIZED VIEW conditions_summary_daily; +NOTICE: drop cascades to table _timescaledb_internal._hyper_3_10_chunk +SELECT id, proc_name, hypertable_id +FROM _timescaledb_config.bgw_job WHERE id = :job_id_5; + id | proc_name | hypertable_id +----+-----------+--------------- +(0 rows) + +-- Cleanup +DROP TABLE conditions; +DROP TABLE custom_log; +-- Stop Background Workers +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +SELECT _timescaledb_functions.restart_background_workers(); + restart_background_workers +---------------------------- + t +(1 row) + +\set ON_ERROR_STOP 0 +-- add test for custom jobs with custom check functions +-- create the functions/procedures to be used as checking functions +CREATE OR REPLACE PROCEDURE test_config_check_proc(config jsonb) +LANGUAGE PLPGSQL +AS $$ +DECLARE + drop_after interval; +BEGIN + SELECT jsonb_object_field_text (config, 'drop_after')::interval INTO STRICT drop_after; + IF drop_after IS NULL THEN + RAISE EXCEPTION 'Config must be not NULL and have drop_after'; + END IF ; +END +$$; +CREATE OR REPLACE FUNCTION test_config_check_func(config jsonb) RETURNS VOID +AS $$ +DECLARE + drop_after interval; +BEGIN + IF config IS NULL THEN + RETURN; + END IF; + SELECT jsonb_object_field_text (config, 'drop_after')::interval INTO STRICT drop_after; + IF drop_after IS NULL THEN + RAISE EXCEPTION 'Config can be NULL but must have drop_after if not'; + END IF ; +END +$$ LANGUAGE PLPGSQL; +-- step 2, create a procedure to run as a custom job +CREATE OR REPLACE PROCEDURE test_proc_with_check(job_id int, config jsonb) +LANGUAGE PLPGSQL +AS $$ +BEGIN + RAISE NOTICE 'Will only print this if config passes checks, my config is %', config; +END +$$; +-- step 3, add the job with the config check function passed as argument +-- test procedures, should get an unsupported error +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_proc'::regproc); +ERROR: unsupported function type +-- test functions +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func'::regproc); +ERROR: Config can be NULL but must have drop_after if not +select add_job('test_proc_with_check', '5 secs', config => NULL, check_config => 'test_config_check_func'::regproc); + add_job +--------- + 1005 +(1 row) + +select add_job('test_proc_with_check', '5 secs', config => '{"drop_after": "chicken"}', check_config => 'test_config_check_func'::regproc); +ERROR: invalid input syntax for type interval: "chicken" +select add_job('test_proc_with_check', '5 secs', config => '{"drop_after": "2 weeks"}', check_config => 'test_config_check_func'::regproc) +as job_with_func_check_id \gset +--- test alter_job +select alter_job(:job_with_func_check_id, config => '{"drop_after":"chicken"}'); +ERROR: invalid input syntax for type interval: "chicken" +select config from alter_job(:job_with_func_check_id, config => '{"drop_after":"5 years"}'); + config +--------------------------- + {"drop_after": "5 years"} +(1 row) + +-- test that jobs with an incorrect check function signature will not be registered +-- these are all incorrect function signatures +CREATE OR REPLACE FUNCTION test_config_check_func_0args() RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'I take no arguments and will validate anything you give me!'; +END +$$ LANGUAGE PLPGSQL; +CREATE OR REPLACE FUNCTION test_config_check_func_2args(config jsonb, intarg int) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'I take two arguments (jsonb, int) and I should fail to run!'; +END +$$ LANGUAGE PLPGSQL; +CREATE OR REPLACE FUNCTION test_config_check_func_intarg(config int) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'I take one argument which is an integer and I should fail to run!'; +END +$$ LANGUAGE PLPGSQL; +-- -- this should fail, it has an incorrect check function +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_0args'::regproc); +ERROR: function or procedure public.test_config_check_func_0args(config jsonb) not found +-- -- so should this +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_2args'::regproc); +ERROR: function or procedure public.test_config_check_func_2args(config jsonb) not found +-- and this +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_intarg'::regproc); +ERROR: function or procedure public.test_config_check_func_intarg(config jsonb) not found +-- and this fails as it calls a nonexistent function +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_nonexistent_check_func'::regproc); +ERROR: function "test_nonexistent_check_func" does not exist at character 82 +-- when called with a valid check function and a NULL config no check should occur +CREATE OR REPLACE FUNCTION test_config_check_func(config jsonb) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'This message will get printed for both NULL and not NULL config'; +END +$$ LANGUAGE PLPGSQL; +SET client_min_messages = NOTICE; +-- check done for both NULL and non-NULL config +select add_job('test_proc_with_check', '5 secs', config => NULL, check_config => 'test_config_check_func'::regproc); +NOTICE: This message will get printed for both NULL and not NULL config + add_job +--------- + 1007 +(1 row) + +-- check done +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func'::regproc) as job_id \gset +NOTICE: This message will get printed for both NULL and not NULL config +-- check function not returning void +CREATE OR REPLACE FUNCTION test_config_check_func_returns_int(config jsonb) RETURNS INT +AS $$ +BEGIN + raise notice 'I print a message, and then I return least(1,2)'; + RETURN LEAST(1, 2); +END +$$ LANGUAGE PLPGSQL; +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_config_check_func_returns_int'::regproc, +initial_start => :'time_zero'::timestamptz) as job_id_int \gset +NOTICE: I print a message, and then I return least(1,2) +-- drop the registered check function, verify that alter_job will work and print a warning that +-- the check is being skipped due to the check function missing +ALTER FUNCTION test_config_check_func RENAME TO renamed_func; +select job_id, schedule_interval, config, check_config from alter_job(:job_id, schedule_interval => '1 hour'); +WARNING: function public.test_config_check_func(config jsonb) not found, skipping config validation for job 1008 + job_id | schedule_interval | config | check_config +--------+-------------------+--------+------------------------------- + 1008 | @ 1 hour | {} | public.test_config_check_func +(1 row) + +DROP FUNCTION test_config_check_func_returns_int; +select job_id, schedule_interval, config, check_config from alter_job(:job_id_int, config => '{"field":"value"}'); +WARNING: function public.test_config_check_func_returns_int(config jsonb) not found, skipping config validation for job 1009 + job_id | schedule_interval | config | check_config +--------+-------------------+--------------------+------------------------------------------- + 1009 | @ 5 secs | {"field": "value"} | public.test_config_check_func_returns_int +(1 row) + +-- rename the check function and then call alter_job to register the new name +select job_id, schedule_interval, config, check_config from alter_job(:job_id, check_config => 'renamed_func'::regproc); +NOTICE: This message will get printed for both NULL and not NULL config + job_id | schedule_interval | config | check_config +--------+-------------------+--------+--------------------- + 1008 | @ 1 hour | {} | public.renamed_func +(1 row) + +-- run alter again, should get a config check +select job_id, schedule_interval, config, check_config from alter_job(:job_id, config => '{}'); +NOTICE: This message will get printed for both NULL and not NULL config + job_id | schedule_interval | config | check_config +--------+-------------------+--------+--------------------- + 1008 | @ 1 hour | {} | public.renamed_func +(1 row) + +-- do not drop the current check function but register a new one +CREATE OR REPLACE FUNCTION substitute_check_func(config jsonb) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'This message is a substitute of the previously printed one'; +END +$$ LANGUAGE PLPGSQL; +-- register the new check +select job_id, schedule_interval, config, check_config from alter_job(:job_id, check_config => 'substitute_check_func'); +NOTICE: This message is a substitute of the previously printed one + job_id | schedule_interval | config | check_config +--------+-------------------+--------+------------------------------ + 1008 | @ 1 hour | {} | public.substitute_check_func +(1 row) + +select job_id, schedule_interval, config, check_config from alter_job(:job_id, config => '{}'); +NOTICE: This message is a substitute of the previously printed one + job_id | schedule_interval | config | check_config +--------+-------------------+--------+------------------------------ + 1008 | @ 1 hour | {} | public.substitute_check_func +(1 row) + +RESET client_min_messages; +-- test an oid that doesn't exist +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 17424217::regproc); +ERROR: function with OID 17424217 does not exist +\c :TEST_DBNAME :ROLE_SUPERUSER +-- test a function with insufficient privileges +create schema test_schema; +create role user_noexec with login; +grant usage on schema test_schema to user_noexec; +CREATE OR REPLACE FUNCTION test_schema.test_config_check_func_privileges(config jsonb) RETURNS VOID +AS $$ +BEGIN + RAISE NOTICE 'This message will only get printed if privileges suffice'; +END +$$ LANGUAGE PLPGSQL; +revoke execute on function test_schema.test_config_check_func_privileges from public; +-- verify the user doesn't have execute permissions on the function +select has_function_privilege('user_noexec', 'test_schema.test_config_check_func_privileges(jsonb)', 'execute'); + has_function_privilege +------------------------ + f +(1 row) + +\c :TEST_DBNAME user_noexec +-- user_noexec should not have exec permissions on this function +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'test_schema.test_config_check_func_privileges'::regproc); +ERROR: permission denied for function "test_config_check_func_privileges" +\c :TEST_DBNAME :ROLE_SUPERUSER +-- check that alter_job rejects a check function with invalid signature +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'renamed_func', +initial_start => :'time_zero'::timestamptz) as job_id_alter \gset +NOTICE: This message will get printed for both NULL and not NULL config +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter, check_config => 'test_config_check_func_0args'); +ERROR: function or procedure public.test_config_check_func_0args(config jsonb) not found +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter); +NOTICE: This message will get printed for both NULL and not NULL config + job_id | schedule_interval | config | check_config +--------+-------------------+--------+--------------------- + 1010 | @ 5 secs | {} | public.renamed_func +(1 row) + +-- test that we can unregister the check function +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter, check_config => 0); + job_id | schedule_interval | config | check_config +--------+-------------------+--------+-------------- + 1010 | @ 5 secs | {} | +(1 row) + +-- no message printed now +select job_id, schedule_interval, config, check_config from alter_job(:job_id_alter, config => '{}'); + job_id | schedule_interval | config | check_config +--------+-------------------+--------+-------------- + 1010 | @ 5 secs | {} | +(1 row) + +-- test the case where we have a background job that registers jobs with a check fn +CREATE OR REPLACE PROCEDURE add_scheduled_jobs_with_check(job_id int, config jsonb) LANGUAGE PLPGSQL AS +$$ +BEGIN + perform add_job('test_proc_with_check', schedule_interval => '10 secs', config => '{}', check_config => 'renamed_func'); +END +$$; +select add_job('add_scheduled_jobs_with_check', schedule_interval => '1 hour') as last_job_id \gset +-- wait for enough time +SELECT wait_for_job_to_run(:last_job_id, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +select total_runs, total_successes, last_run_status from timescaledb_information.job_stats where job_id = :last_job_id; + total_runs | total_successes | last_run_status +------------+-----------------+----------------- + 1 | 1 | Success +(1 row) + +-- test coverage for alter_job +-- registering an invalid oid +select alter_job(:job_id_alter, check_config => 123456789::regproc); +ERROR: function with OID 123456789 does not exist +-- registering a function with insufficient privileges +\c :TEST_DBNAME user_noexec +select * from add_job('test_proc_with_check', '5 secs', config => '{}') as job_id_owner \gset +select * from alter_job(:job_id_owner, check_config => 'test_schema.test_config_check_func_privileges'::regproc); +ERROR: permission denied for function "test_config_check_func_privileges" +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP SCHEMA test_schema CASCADE; +NOTICE: drop cascades to function test_schema.test_config_check_func_privileges(jsonb) +-- Delete all jobs with that owner before we can drop the user. +DELETE FROM _timescaledb_config.bgw_job WHERE owner = 'user_noexec'::regrole; +DROP ROLE user_noexec; +-- test with aggregate check proc +create function jsonb_add (j1 jsonb, j2 jsonb) returns jsonb +AS $$ +BEGIN + RETURN j1 || j2; +END +$$ LANGUAGE PLPGSQL; +CREATE AGGREGATE sum_jsb (jsonb) +( + sfunc = jsonb_add, + stype = jsonb, + initcond = '{}' +); +-- for test coverage, check unsupported aggregate type +select add_job('test_proc_with_check', '5 secs', config => '{}', check_config => 'sum_jsb'::regproc); +ERROR: unsupported function type +-- Cleanup jobs +TRUNCATE _timescaledb_config.bgw_job CASCADE; +NOTICE: truncate cascades to table "bgw_job_stat" +NOTICE: truncate cascades to table "bgw_policy_chunk_stats" +-- github issue 4610 +CREATE TABLE sensor_data +( + time timestamptz not null, + sensor_id integer not null, + cpu double precision null, + temperature double precision null +); +SELECT FROM create_hypertable('sensor_data','time'); +-- +(1 row) + +SELECT '2022-10-06 00:00:00+00' as start_date_sd \gset +INSERT INTO sensor_data + SELECT + time + (INTERVAL '1 minute' * random()) AS time, + sensor_id, + random() AS cpu, + random()* 100 AS temperature + FROM + generate_series(:'start_date_sd'::timestamptz - INTERVAL '1 months', :'start_date_sd'::timestamptz - INTERVAL '1 week', INTERVAL '1 minute') AS g1(time), + generate_series(1, 50, 1 ) AS g2(sensor_id) + ORDER BY + time; +-- enable compression +ALTER TABLE sensor_data SET (timescaledb.compress, timescaledb.compress_orderby = 'time DESC'); +-- create new chunks +INSERT INTO sensor_data + SELECT + time + (INTERVAL '1 minute' * random()) AS time, + sensor_id, + random() AS cpu, + random()* 100 AS temperature + FROM + generate_series(:'start_date_sd'::timestamptz - INTERVAL '2 months', :'start_date_sd'::timestamptz - INTERVAL '2 week', INTERVAL '2 minute') AS g1(time), + generate_series(1, 30, 1 ) AS g2(sensor_id) + ORDER BY + time; +-- get the name of a new uncompressed chunk +SELECT chunk_name AS new_uncompressed_chunk_name + FROM timescaledb_information.chunks + WHERE hypertable_name = 'sensor_data' AND NOT is_compressed LIMIT 1 \gset +-- change compression status so that this chunk is skipped when policy is run +update _timescaledb_catalog.chunk set status=3 where table_name = :'new_uncompressed_chunk_name'; +-- add new compression policy job +SELECT add_compression_policy('sensor_data', INTERVAL '1' minute) AS compressjob_id \gset +-- set recompress to true +SELECT alter_job(id,config:=jsonb_set(config,'{recompress}', 'true')) FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + alter_job +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1014,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""recompress"": true, ""hypertable_id"": 4, ""compress_after"": ""@ 1 min""}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +-- verify that there are other uncompressed new chunks that need to be compressed +SELECT count(*) > 1 + FROM timescaledb_information.chunks + WHERE hypertable_name = 'sensor_data' AND NOT is_compressed; + ?column? +---------- + t +(1 row) + +-- disable notice/warning as the new_uncompressed_chunk_name +-- is dynamic and it will be printed in those messages. +SET client_min_messages TO ERROR; +CALL run_job(:compressjob_id); +SET client_min_messages TO NOTICE; +-- check compression status is not changed for the chunk whose status was manually updated +SELECT status FROM _timescaledb_catalog.chunk where table_name = :'new_uncompressed_chunk_name'; + status +-------- + 3 +(1 row) + +-- confirm all the other new chunks are now compressed despite +-- facing an error when trying to compress :'new_uncompressed_chunk_name' +SELECT count(*) = 0 + FROM timescaledb_information.chunks + WHERE hypertable_name = 'sensor_data' AND NOT is_compressed; + ?column? +---------- + t +(1 row) + +-- cleanup +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +DROP TABLE sensor_data; +SELECT _timescaledb_functions.restart_background_workers(); + restart_background_workers +---------------------------- + t +(1 row) + +-- Github issue #5537 +-- Proc that waits until the given job enters the expected state +CREATE OR REPLACE PROCEDURE wait_for_job_status(job_param_id INTEGER, expected_status TEXT, spins INTEGER=:TEST_SPINWAIT_ITERS) +LANGUAGE PLPGSQL AS $$ +DECLARE + jobstatus TEXT; +BEGIN + FOR i in 1..spins + LOOP + SELECT job_status FROM timescaledb_information.job_stats WHERE job_id = job_param_id INTO jobstatus; + IF jobstatus = expected_status THEN + RETURN; + END IF; + PERFORM pg_sleep(0.1); + ROLLBACK; + END LOOP; + RAISE EXCEPTION 'wait_for_job_status(%): timeout after % tries', job_param_id, spins; +END; +$$; +-- Proc that sleeps for 1m - to keep the test jobs in running state +CREATE OR REPLACE PROCEDURE proc_that_sleeps(job_id INT, config JSONB) +LANGUAGE PLPGSQL AS +$$ +BEGIN + PERFORM pg_sleep(60); +END +$$; +-- create new jobs and ensure that the second one gets scheduled +-- before the first one by adjusting the initial_start values +SELECT add_job('proc_that_sleeps', '1h', initial_start => now()::timestamptz + interval '2s') AS job_id_1 \gset +SELECT add_job('proc_that_sleeps', '1h', initial_start => now()::timestamptz - interval '2s') AS job_id_2 \gset +-- wait for the jobs to start running job_2 will start running first +CALL wait_for_job_status(:job_id_2, 'Running'); +CALL wait_for_job_status(:job_id_1, 'Running'); +-- add a new job and wait for it to start +SELECT add_job('proc_that_sleeps', '1h') AS job_id_3 \gset +CALL wait_for_job_status(:job_id_3, 'Running'); +-- verify that none of the jobs crashed +SELECT job_id, job_status, next_start, + total_runs, total_successes, total_failures + FROM timescaledb_information.job_stats + WHERE job_id IN (:job_id_1, :job_id_2, :job_id_3) + ORDER BY job_id; + job_id | job_status | next_start | total_runs | total_successes | total_failures +--------+------------+------------+------------+-----------------+---------------- + 1015 | Running | -infinity | 1 | 0 | 0 + 1016 | Running | -infinity | 1 | 0 | 0 + 1017 | Running | -infinity | 1 | 0 | 0 +(3 rows) + +SELECT job_id, err_message + FROM timescaledb_information.job_errors + WHERE job_id IN (:job_id_1, :job_id_2, :job_id_3); + job_id | err_message +--------+------------- +(0 rows) + +-- cleanup +SELECT _timescaledb_functions.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +CALL wait_for_job_status(:job_id_1, 'Scheduled'); +CALL wait_for_job_status(:job_id_2, 'Scheduled'); +CALL wait_for_job_status(:job_id_3, 'Scheduled'); +SELECT delete_job(:job_id_1); + delete_job +------------ + +(1 row) + +SELECT delete_job(:job_id_2); + delete_job +------------ + +(1 row) + +SELECT delete_job(:job_id_3); + delete_job +------------ + +(1 row) + diff --git a/tsl/test/expected/compression.out b/tsl/test/expected/compression.out index 7e641f3b329..a47825e9a0b 100644 --- a/tsl/test/expected/compression.out +++ b/tsl/test/expected/compression.out @@ -124,28 +124,30 @@ select compress_chunk( '_timescaledb_internal._hyper_1_1_chunk'); \x select * from _timescaledb_catalog.compression_chunk_size order by chunk_id; --[ RECORD 1 ]------------+------ -chunk_id | 1 -compressed_chunk_id | 6 -uncompressed_heap_size | 8192 -uncompressed_toast_size | 0 -uncompressed_index_size | 32768 -compressed_heap_size | 16384 -compressed_toast_size | 8192 -compressed_index_size | 16384 -numrows_pre_compression | 1 -numrows_post_compression | 1 --[ RECORD 2 ]------------+------ -chunk_id | 2 -compressed_chunk_id | 5 -uncompressed_heap_size | 8192 -uncompressed_toast_size | 0 -uncompressed_index_size | 32768 -compressed_heap_size | 16384 -compressed_toast_size | 8192 -compressed_index_size | 16384 -numrows_pre_compression | 1 -numrows_post_compression | 1 +-[ RECORD 1 ]--------------+------ +chunk_id | 1 +compressed_chunk_id | 6 +uncompressed_heap_size | 8192 +uncompressed_toast_size | 0 +uncompressed_index_size | 32768 +compressed_heap_size | 16384 +compressed_toast_size | 8192 +compressed_index_size | 16384 +numrows_pre_compression | 1 +numrows_post_compression | 1 +numrows_frozen_immediately | 1 +-[ RECORD 2 ]--------------+------ +chunk_id | 2 +compressed_chunk_id | 5 +uncompressed_heap_size | 8192 +uncompressed_toast_size | 0 +uncompressed_index_size | 32768 +compressed_heap_size | 16384 +compressed_toast_size | 8192 +compressed_index_size | 16384 +numrows_pre_compression | 1 +numrows_post_compression | 1 +numrows_frozen_immediately | 1 \x select ch1.id, ch1.schema_name, ch1.table_name , ch2.table_name as compress_table diff --git a/tsl/test/expected/compression_bgw-14.out b/tsl/test/expected/compression_bgw-14.out index 7e283259af6..6470ec0e451 100644 --- a/tsl/test/expected/compression_bgw-14.out +++ b/tsl/test/expected/compression_bgw-14.out @@ -72,7 +72,7 @@ from chunk_compression_stats('conditions') where compression_status like 'Compre _hyper_1_1_chunk | 32 kB | 40 kB (1 row) -SELECT * FROM _timescaledb_catalog.chunk ORDER BY id; +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk ORDER BY id; id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk ----+---------------+-----------------------+--------------------------+---------------------+---------+--------+----------- 1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | 4 | f | 1 | f @@ -274,7 +274,6 @@ ERROR: permission denied to start background process as role "nologin_role" \set ON_ERROR_STOP 1 DROP TABLE test_table_nologin; RESET ROLE; -REVOKE NOLOGIN_ROLE FROM :ROLE_DEFAULT_PERM_USER; \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER CREATE TABLE conditions( time TIMESTAMPTZ NOT NULL, diff --git a/tsl/test/expected/compression_bgw-15.out b/tsl/test/expected/compression_bgw-15.out index 7e283259af6..6470ec0e451 100644 --- a/tsl/test/expected/compression_bgw-15.out +++ b/tsl/test/expected/compression_bgw-15.out @@ -72,7 +72,7 @@ from chunk_compression_stats('conditions') where compression_status like 'Compre _hyper_1_1_chunk | 32 kB | 40 kB (1 row) -SELECT * FROM _timescaledb_catalog.chunk ORDER BY id; +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk ORDER BY id; id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk ----+---------------+-----------------------+--------------------------+---------------------+---------+--------+----------- 1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | 4 | f | 1 | f @@ -274,7 +274,6 @@ ERROR: permission denied to start background process as role "nologin_role" \set ON_ERROR_STOP 1 DROP TABLE test_table_nologin; RESET ROLE; -REVOKE NOLOGIN_ROLE FROM :ROLE_DEFAULT_PERM_USER; \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER CREATE TABLE conditions( time TIMESTAMPTZ NOT NULL, diff --git a/tsl/test/expected/compression_bgw-16.out b/tsl/test/expected/compression_bgw-16.out new file mode 100644 index 00000000000..6470ec0e451 --- /dev/null +++ b/tsl/test/expected/compression_bgw-16.out @@ -0,0 +1,657 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE ROLE NOLOGIN_ROLE WITH nologin noinherit; +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO NOLOGIN_ROLE; +GRANT NOLOGIN_ROLE TO :ROLE_DEFAULT_PERM_USER WITH ADMIN OPTION; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE conditions ( + time TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + location2 char(10) NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +select create_hypertable( 'conditions', 'time', chunk_time_interval=> '31days'::interval); + create_hypertable +------------------------- + (1,public,conditions,t) +(1 row) + +--TEST 1-- +--cannot set policy without enabling compression -- +\set ON_ERROR_STOP 0 +select add_compression_policy('conditions', '60d'::interval); +ERROR: compression not enabled on hypertable "conditions" +\set ON_ERROR_STOP 1 +-- TEST2 -- +--add a policy to compress chunks -- +alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'time'); +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75; +select add_compression_policy('conditions', '60d'::interval) AS compressjob_id +\gset +select * from _timescaledb_config.bgw_job where id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------+------------------------+--------------------------+---------- + 1000 | Compression Policy [1000] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 1 | {"hypertable_id": 1, "compress_after": "@ 60 days"} | _timescaledb_functions | policy_compression_check | +(1 row) + +select * from alter_job(:compressjob_id, schedule_interval=>'1s'); + job_id | schedule_interval | max_runtime | max_retries | retry_period | scheduled | config | next_start | check_config | fixed_schedule | initial_start | timezone +--------+-------------------+-------------+-------------+--------------+-----------+-----------------------------------------------------+------------+-------------------------------------------------+----------------+---------------+---------- + 1000 | @ 1 sec | @ 0 | -1 | @ 1 hour | t | {"hypertable_id": 1, "compress_after": "@ 60 days"} | -infinity | _timescaledb_functions.policy_compression_check | f | | +(1 row) + +--enable maxchunks to 1 so that only 1 chunk is compressed by the job +SELECT alter_job(id,config:=jsonb_set(config,'{maxchunks_to_compress}', '1')) + FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + (1000,"@ 1 sec","@ 0",-1,"@ 1 hour",t,"{""hypertable_id"": 1, ""compress_after"": ""@ 60 days"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +select * from _timescaledb_config.bgw_job where id >= 1000 ORDER BY id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+--------------------------+---------- + 1000 | Compression Policy [1000] | @ 1 sec | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 1 | {"hypertable_id": 1, "compress_after": "@ 60 days", "maxchunks_to_compress": 1} | _timescaledb_functions | policy_compression_check | +(1 row) + +insert into conditions +select now()::timestamp, 'TOK', 'sony', 55, 75; +-- TEST3 -- +--only the old chunks will get compressed when policy is executed-- +CALL run_job(:compressjob_id); +select chunk_name, pg_size_pretty(before_compression_total_bytes) before_total, +pg_size_pretty( after_compression_total_bytes) after_total +from chunk_compression_stats('conditions') where compression_status like 'Compressed' order by chunk_name; + chunk_name | before_total | after_total +------------------+--------------+------------- + _hyper_1_1_chunk | 32 kB | 40 kB +(1 row) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk ORDER BY id; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+--------------------------+---------------------+---------+--------+----------- + 1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | 4 | f | 1 | f + 2 | 1 | _timescaledb_internal | _hyper_1_2_chunk | | f | 0 | f + 3 | 1 | _timescaledb_internal | _hyper_1_3_chunk | | f | 0 | f + 4 | 2 | _timescaledb_internal | compress_hyper_2_4_chunk | | f | 0 | f +(4 rows) + +-- TEST 4 -- +--cannot set another policy +\set ON_ERROR_STOP 0 +select add_compression_policy('conditions', '60d'::interval, if_not_exists=>true); +NOTICE: compression policy already exists for hypertable "conditions", skipping + add_compression_policy +------------------------ + -1 +(1 row) + +select add_compression_policy('conditions', '60d'::interval); +ERROR: compression policy already exists for hypertable or continuous aggregate "conditions" +select add_compression_policy('conditions', '30d'::interval, if_not_exists=>true); +WARNING: compression policy already exists for hypertable "conditions" + add_compression_policy +------------------------ + -1 +(1 row) + +\set ON_ERROR_STOP 1 +--TEST 5 -- +-- drop the policy -- +select remove_compression_policy('conditions'); + remove_compression_policy +--------------------------- + t +(1 row) + +select count(*) from _timescaledb_config.bgw_job WHERE id>=1000; + count +------- + 0 +(1 row) + +--TEST 6 -- +-- try to execute the policy after it has been dropped -- +\set ON_ERROR_STOP 0 +CALL run_job(:compressjob_id); +ERROR: job 1000 not found +--errors with bad input for add/remove compression policy +create view dummyv1 as select * from conditions limit 1; +select add_compression_policy( 100 , compress_after=> '1 day'::interval); +ERROR: object with id "100" not found +select add_compression_policy( 'dummyv1', compress_after=> '1 day'::interval ); +ERROR: "dummyv1" is not a hypertable or a continuous aggregate +select remove_compression_policy( 100 ); +ERROR: relation is not a hypertable or continuous aggregate +\set ON_ERROR_STOP 1 +-- We're done with the table, so drop it. +DROP TABLE IF EXISTS conditions CASCADE; +NOTICE: drop cascades to table _timescaledb_internal.compress_hyper_2_4_chunk +NOTICE: drop cascades to view dummyv1 +--TEST 7 +--compression policy for smallint, integer or bigint based partition hypertable +--smallint test +CREATE TABLE test_table_smallint(time SMALLINT, val SMALLINT); +SELECT create_hypertable('test_table_smallint', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------------- + (3,public,test_table_smallint,t) +(1 row) + +CREATE OR REPLACE FUNCTION dummy_now_smallint() RETURNS SMALLINT LANGUAGE SQL IMMUTABLE AS 'SELECT 5::SMALLINT'; +SELECT set_integer_now_func('test_table_smallint', 'dummy_now_smallint'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_smallint SELECT generate_series(1,5), 10; +ALTER TABLE test_table_smallint SET (timescaledb.compress); +\set ON_ERROR_STOP 0 +select add_compression_policy( 'test_table_smallint', compress_after=> '1 day'::interval ); +ERROR: unsupported compress_after argument type, expected type : smallint +\set ON_ERROR_STOP 1 +SELECT add_compression_policy('test_table_smallint', 2::SMALLINT) AS compressjob_id \gset +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+---------- + 1001 | Compression Policy [1001] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": 2} | _timescaledb_functions | policy_compression_check | +(1 row) + +--will compress all chunks that need compression +CALL run_job(:compressjob_id); +SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes +FROM chunk_compression_stats('test_table_smallint') +WHERE compression_status LIKE 'Compressed' +ORDER BY chunk_name; + chunk_name | before_compression_total_bytes | after_compression_total_bytes +------------------+--------------------------------+------------------------------- + _hyper_3_5_chunk | 24576 | 24576 + _hyper_3_6_chunk | 24576 | 24576 +(2 rows) + +--integer tests +CREATE TABLE test_table_integer(time INTEGER, val INTEGER); +SELECT create_hypertable('test_table_integer', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------------- + (5,public,test_table_integer,t) +(1 row) + +CREATE OR REPLACE FUNCTION dummy_now_integer() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS 'SELECT 5::INTEGER'; +SELECT set_integer_now_func('test_table_integer', 'dummy_now_integer'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_integer SELECT generate_series(1,5), 10; +ALTER TABLE test_table_integer SET (timescaledb.compress); +SELECT add_compression_policy('test_table_integer', 2::INTEGER) AS compressjob_id \gset +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+---------- + 1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 5 | {"hypertable_id": 5, "compress_after": 2} | _timescaledb_functions | policy_compression_check | +(1 row) + +--will compress all chunks that need compression +CALL run_job(:compressjob_id); +SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes +FROM chunk_compression_stats('test_table_integer') +WHERE compression_status LIKE 'Compressed' +ORDER BY chunk_name; + chunk_name | before_compression_total_bytes | after_compression_total_bytes +-------------------+--------------------------------+------------------------------- + _hyper_5_12_chunk | 24576 | 24576 + _hyper_5_13_chunk | 24576 | 24576 +(2 rows) + +--bigint test +CREATE TABLE test_table_bigint(time BIGINT, val BIGINT); +SELECT create_hypertable('test_table_bigint', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +-------------------------------- + (7,public,test_table_bigint,t) +(1 row) + +CREATE OR REPLACE FUNCTION dummy_now_bigint() RETURNS BIGINT LANGUAGE SQL IMMUTABLE AS 'SELECT 5::BIGINT'; +SELECT set_integer_now_func('test_table_bigint', 'dummy_now_bigint'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO test_table_bigint SELECT generate_series(1,5), 10; +ALTER TABLE test_table_bigint SET (timescaledb.compress); +SELECT add_compression_policy('test_table_bigint', 2::BIGINT) AS compressjob_id \gset +SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+---------- + 1003 | Compression Policy [1003] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 7 | {"hypertable_id": 7, "compress_after": 2} | _timescaledb_functions | policy_compression_check | +(1 row) + +--will compress all chunks that need compression +CALL run_job(:compressjob_id); +SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes +FROM chunk_compression_stats('test_table_bigint') +WHERE compression_status LIKE 'Compressed' +ORDER BY chunk_name; + chunk_name | before_compression_total_bytes | after_compression_total_bytes +-------------------+--------------------------------+------------------------------- + _hyper_7_19_chunk | 24576 | 24576 + _hyper_7_20_chunk | 24576 | 24576 +(2 rows) + +--TEST 8 +--hypertable owner lacks permission to start background worker +SET ROLE NOLOGIN_ROLE; +CREATE TABLE test_table_nologin(time bigint, val int); +SELECT create_hypertable('test_table_nologin', 'time', chunk_time_interval => 1); +NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------------- + (9,public,test_table_nologin,t) +(1 row) + +SELECT set_integer_now_func('test_table_nologin', 'dummy_now_bigint'); + set_integer_now_func +---------------------- + +(1 row) + +ALTER TABLE test_table_nologin set (timescaledb.compress); +\set ON_ERROR_STOP 0 +SELECT add_compression_policy('test_table_nologin', 2::int); +ERROR: permission denied to start background process as role "nologin_role" +\set ON_ERROR_STOP 1 +DROP TABLE test_table_nologin; +RESET ROLE; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE conditions( + time TIMESTAMPTZ NOT NULL, + device INTEGER, + temperature FLOAT +); +SELECT * FROM create_hypertable('conditions', 'time', + chunk_time_interval => '1 day'::interval); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 11 | public | conditions | t +(1 row) + +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '10 min') AS time; +CREATE MATERIALIZED VIEW conditions_summary +WITH (timescaledb.continuous) AS +SELECT device, + time_bucket(INTERVAL '1 hour', "time") AS day, + AVG(temperature) AS avg_temperature, + MAX(temperature) AS max_temperature, + MIN(temperature) AS min_temperature +FROM conditions +GROUP BY device, time_bucket(INTERVAL '1 hour', "time") WITH NO DATA; +CALL refresh_continuous_aggregate('conditions_summary', NULL, NULL); +ALTER TABLE conditions SET (timescaledb.compress); +SELECT COUNT(*) AS dropped_chunks_count + FROM drop_chunks('conditions', TIMESTAMPTZ '2018-12-15 00:00'); + dropped_chunks_count +---------------------- + 14 +(1 row) + +-- We need to have some chunks that are marked as dropped, otherwise +-- we will not have a problem below. +SELECT COUNT(*) AS dropped_chunks_count + FROM _timescaledb_catalog.chunk + WHERE dropped = TRUE; + dropped_chunks_count +---------------------- + 14 +(1 row) + +SELECT count(*) FROM timescaledb_information.chunks +WHERE hypertable_name = 'conditions' and is_compressed = true; + count +------- + 0 +(1 row) + +SELECT add_compression_policy AS job_id + FROM add_compression_policy('conditions', INTERVAL '1 day') \gset +-- job compresses only 1 chunk at a time -- +SELECT alter_job(id,config:=jsonb_set(config,'{maxchunks_to_compress}', '1')) + FROM _timescaledb_config.bgw_job WHERE id = :job_id; + alter_job +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1004,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""hypertable_id"": 11, ""compress_after"": ""@ 1 day"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +SELECT alter_job(id,config:=jsonb_set(config,'{verbose_log}', 'true')) + FROM _timescaledb_config.bgw_job WHERE id = :job_id; + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1004,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""verbose_log"": true, ""hypertable_id"": 11, ""compress_after"": ""@ 1 day"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,) +(1 row) + +set client_min_messages TO LOG; +CALL run_job(:job_id); +LOG: statement: CALL run_job(1004); +LOG: job 1004 completed processing chunk _timescaledb_internal._hyper_11_40_chunk +set client_min_messages TO NOTICE; +LOG: statement: set client_min_messages TO NOTICE; +SELECT count(*) FROM timescaledb_information.chunks +WHERE hypertable_name = 'conditions' and is_compressed = true; + count +------- + 1 +(1 row) + +\i include/recompress_basic.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE OR REPLACE VIEW compressed_chunk_info_view AS +SELECT + h.schema_name AS hypertable_schema, + h.table_name AS hypertable_name, + c.schema_name as chunk_schema, + c.table_name as chunk_name, + c.status as chunk_status, + comp.schema_name as compressed_chunk_schema, + comp.table_name as compressed_chunk_name +FROM + _timescaledb_catalog.hypertable h JOIN + _timescaledb_catalog.chunk c ON h.id = c.hypertable_id + LEFT JOIN _timescaledb_catalog.chunk comp +ON comp.id = c.compressed_chunk_id +; +CREATE TABLE test2 (timec timestamptz NOT NULL, i integer , + b bigint, t text); +SELECT table_name from create_hypertable('test2', 'timec', chunk_time_interval=> INTERVAL '7 days'); + table_name +------------ + test2 +(1 row) + +INSERT INTO test2 SELECT q, 10, 11, 'hello' FROM generate_series( '2020-01-03 10:00:00+00', '2020-01-03 12:00:00+00' , '5 min'::interval) q; +ALTER TABLE test2 set (timescaledb.compress, +timescaledb.compress_segmentby = 'b', +timescaledb.compress_orderby = 'timec DESC'); +SELECT compress_chunk(c) +FROM show_chunks('test2') c; + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_14_62_chunk +(1 row) + +---insert into the middle of the range --- +INSERT INTO test2 values ( '2020-01-03 10:01:00+00', 20, 11, '2row'); +INSERT INTO test2 values ( '2020-01-03 11:01:00+00', 20, 11, '3row'); +INSERT INTO test2 values ( '2020-01-03 12:01:00+00', 20, 11, '4row'); +--- insert a new segment by --- +INSERT INTO test2 values ( '2020-01-03 11:01:00+00', 20, 12, '12row'); +SELECT time_bucket(INTERVAL '2 hour', timec), b, count(*) +FROM test2 +GROUP BY time_bucket(INTERVAL '2 hour', timec), b +ORDER BY 1, 2; + time_bucket | b | count +------------------------------+----+------- + Fri Jan 03 02:00:00 2020 PST | 11 | 26 + Fri Jan 03 02:00:00 2020 PST | 12 | 1 + Fri Jan 03 04:00:00 2020 PST | 11 | 2 +(3 rows) + +--check status for chunk -- +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 9 | _hyper_14_62_chunk +(1 row) + +SELECT compressed_chunk_schema || '.' || compressed_chunk_name as "COMP_CHUNK_NAME", + chunk_schema || '.' || chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' \gset +SELECT count(*) from test2; + count +------- + 29 +(1 row) + +-- call recompress_chunk inside a transaction. This should fails since +-- it contains transaction-terminating commands. +\set ON_ERROR_STOP 0 +START TRANSACTION; +CALL recompress_chunk(:'CHUNK_NAME'::regclass); +ROLLBACK; +\set ON_ERROR_STOP 1 +CALL recompress_chunk(:'CHUNK_NAME'::regclass); +-- Demonstrate that no locks are held on the hypertable, chunk, or the +-- compressed chunk after recompress_chunk has executed. +SELECT pid, locktype, relation, relation::regclass, mode, granted +FROM pg_locks +WHERE relation::regclass::text IN (:'CHUNK_NAME', :'COMP_CHUNK_NAME', 'test2') +ORDER BY pid; + pid | locktype | relation | relation | mode | granted +-----+----------+----------+----------+------+--------- +(0 rows) + +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_14_62_chunk +(1 row) + +--- insert into a compressed chunk again + a new chunk-- +INSERT INTO test2 values ( '2020-01-03 11:01:03+00', 20, 11, '33row'), + ( '2020-01-03 11:01:06+00', 20, 11, '36row'), + ( '2020-01-03 11:02:00+00', 20, 12, '12row'), + ( '2020-04-03 00:02:00+00', 30, 13, '3013row'); +SELECT time_bucket(INTERVAL '2 hour', timec), b, count(*) +FROM test2 +GROUP BY time_bucket(INTERVAL '2 hour', timec), b +ORDER BY 1, 2; + time_bucket | b | count +------------------------------+----+------- + Fri Jan 03 02:00:00 2020 PST | 11 | 28 + Fri Jan 03 02:00:00 2020 PST | 12 | 2 + Fri Jan 03 04:00:00 2020 PST | 11 | 2 + Thu Apr 02 17:00:00 2020 PDT | 13 | 1 +(4 rows) + +--chunk status should be unordered for the previously compressed chunk +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 9 | _hyper_14_62_chunk + 0 | _hyper_14_64_chunk +(2 rows) + +SELECT add_compression_policy AS job_id + FROM add_compression_policy('test2', '30d'::interval) \gset +CALL run_job(:job_id); +CALL run_job(:job_id); +-- status should be compressed --- +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'test2' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_14_62_chunk + 1 | _hyper_14_64_chunk +(2 rows) + +\set ON_ERROR_STOP 0 +-- call recompress_chunk when status is not unordered +CALL recompress_chunk(:'CHUNK_NAME'::regclass, true); +psql:include/recompress_basic.sql:110: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk" +-- This will succeed and compress the chunk for the test below. +CALL recompress_chunk(:'CHUNK_NAME'::regclass, false); +psql:include/recompress_basic.sql:113: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk" +--now decompress it , then try and recompress +SELECT decompress_chunk(:'CHUNK_NAME'::regclass); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_14_62_chunk +(1 row) + +CALL recompress_chunk(:'CHUNK_NAME'::regclass); +psql:include/recompress_basic.sql:117: ERROR: call compress_chunk instead of recompress_chunk +\set ON_ERROR_STOP 1 +-- test recompress policy +CREATE TABLE metrics(time timestamptz NOT NULL); +SELECT hypertable_id AS "HYPERTABLE_ID", schema_name, table_name, created FROM create_hypertable('metrics','time') \gset +ALTER TABLE metrics SET (timescaledb.compress); +-- create chunk with some data and compress +INSERT INTO metrics SELECT '2000-01-01' FROM generate_series(1,10); +-- create custom compression job without recompress boolean +SELECT add_job('_timescaledb_functions.policy_compression','1w',('{"hypertable_id": '||:'HYPERTABLE_ID'||', "compress_after": "@ 7 days"}')::jsonb, initial_start => '2000-01-01 00:00:00+00'::timestamptz) AS "JOB_COMPRESS" \gset +-- first call should compress +CALL run_job(:JOB_COMPRESS); +-- 2nd call should do nothing +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- do an INSERT so recompress has something to do +INSERT INTO metrics SELECT '2000-01-01'; +---- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +-- should recompress +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- disable recompress in compress job +SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','false'), next_start => '2000-01-01 00:00:00+00'::timestamptz) FROM _timescaledb_config.bgw_job WHERE id = :JOB_COMPRESS; + alter_job +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1006,"@ 7 days","@ 0",-1,"@ 5 mins",t,"{""recompress"": false, ""hypertable_id"": 16, ""compress_after"": ""@ 7 days""}","Fri Dec 31 16:00:00 1999 PST",,t,"Fri Dec 31 16:00:00 1999 PST",) +(1 row) + +-- nothing to do +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- do an INSERT so recompress has something to do +INSERT INTO metrics SELECT '2000-01-01'; +---- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +-- still nothing to do since we disabled recompress +CALL run_job(:JOB_COMPRESS); +---- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +-- reenable recompress in compress job +SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','true'), next_start => '2000-01-01 00:00:00+00'::timestamptz) FROM _timescaledb_config.bgw_job WHERE id = :JOB_COMPRESS; + alter_job +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1006,"@ 7 days","@ 0",-1,"@ 5 mins",t,"{""recompress"": true, ""hypertable_id"": 16, ""compress_after"": ""@ 7 days""}","Fri Dec 31 16:00:00 1999 PST",,t,"Fri Dec 31 16:00:00 1999 PST",) +(1 row) + +-- should recompress now +CALL run_job(:JOB_COMPRESS); +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +SELECT delete_job(:JOB_COMPRESS); + delete_job +------------ + +(1 row) + +SELECT add_job('_timescaledb_functions.policy_recompression','1w',('{"hypertable_id": '||:'HYPERTABLE_ID'||', "recompress_after": "@ 7 days", "maxchunks_to_compress": 1}')::jsonb) AS "JOB_RECOMPRESS" \gset +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +---- nothing to do yet +CALL run_job(:JOB_RECOMPRESS); +psql:include/recompress_basic.sql:189: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy +---- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 1 +(1 row) + +-- create some work for recompress +INSERT INTO metrics SELECT '2000-01-01'; +-- status should be 3 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +CALL run_job(:JOB_RECOMPRESS); +-- status should be 1 +SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics'; + chunk_status +-------------- + 9 +(1 row) + +SELECT delete_job(:JOB_RECOMPRESS); + delete_job +------------ + +(1 row) + +-- Teardown test +\c :TEST_DBNAME :ROLE_SUPERUSER +REVOKE CREATE ON SCHEMA public FROM NOLOGIN_ROLE; +DROP ROLE NOLOGIN_ROLE; diff --git a/tsl/test/expected/telemetry_stats-13.out b/tsl/test/expected/telemetry_stats-13.out new file mode 100644 index 00000000000..13d4ca32ce8 --- /dev/null +++ b/tsl/test/expected/telemetry_stats-13.out @@ -0,0 +1,748 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +--telemetry tests that require a community license +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +-- function call info size is too variable for this test, so disable it +SET timescaledb.telemetry_level='no_functions'; +SELECT setseed(1); + setseed +--------- + +(1 row) + +-- Create a materialized view from the telemetry report so that we +-- don't regenerate telemetry for every query. Filter heap_size for +-- materialized views since PG14 reports a different heap size for +-- them compared to earlier PG versions. +CREATE MATERIALIZED VIEW telemetry_report AS +SELECT (r #- '{relations,materialized_views,heap_size}') AS r +FROM get_telemetry_report() r; +CREATE VIEW relations AS +SELECT r -> 'relations' AS rels +FROM telemetry_report; +SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, + rels -> 'hypertables' -> 'num_relations' AS num_hypertables +FROM relations; + num_continuous_aggs | num_hypertables +---------------------+----------------- + 0 | 0 +(1 row) + +-- check telemetry picks up flagged content from metadata +SELECT r -> 'db_metadata' AS db_metadata +FROM telemetry_report; + db_metadata +------------- + {} +(1 row) + +-- check timescaledb_telemetry.cloud +SELECT r -> 'instance_metadata' AS instance_metadata +FROM telemetry_report r; + instance_metadata +------------------- + {"cloud": "ci"} +(1 row) + +CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); +CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); +CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); +CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); +CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE hyper (LIKE normal); +SELECT table_name FROM create_hypertable('hyper', 'time'); + table_name +------------ + hyper +(1 row) + +CREATE MATERIALIZED VIEW contagg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg" is already up-to-date +CREATE MATERIALIZED VIEW contagg_old +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg_old" is already up-to-date +-- Create another view (already have the "relations" view) +CREATE VIEW devices AS +SELECT DISTINCT ON (device) device +FROM hyper; +-- Show relations with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 0, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 0 + + }, + + "hypertables": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 8192, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 0, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "continuous_aggregates": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- Insert data +INSERT INTO normal +SELECT t, ceil(random() * 10)::int, random() * 30 +FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; +INSERT INTO hyper +SELECT * FROM normal; +INSERT INTO part +SELECT * FROM normal; +CALL refresh_continuous_aggregate('contagg', NULL, NULL); +CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); +-- ANALYZE to get updated reltuples stats +ANALYZE normal, hyper, part; +SELECT count(c) FROM show_chunks('hyper') c; + count +------- + 9 +(1 row) + +SELECT count(c) FROM show_chunks('contagg') c; + count +------- + 2 +(1 row) + +SELECT count(c) FROM show_chunks('contagg_old') c; + count +------- + 2 +(1 row) + +-- Update and show the telemetry report +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 155648, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 16384, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 229376, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- Actual row count should be the same as reltuples stats for all tables +SELECT (SELECT count(*) FROM normal) num_inserted_rows, + (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, + (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, + (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; + num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples +-------------------+------------------+-----------------+---------------- + 697 | 697 | 697 | 697 +(1 row) + +-- Add compression +ALTER TABLE hyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('hyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk +(4 rows) + +ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); +NOTICE: defaulting compress_segmentby to device +NOTICE: defaulting compress_orderby to hour +SELECT compress_chunk(c) +FROM show_chunks('contagg') c ORDER BY c LIMIT 1; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_2_10_chunk +(1 row) + +-- Turn of real-time aggregation +ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); +ANALYZE normal, hyper, part; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 32768, + + "compression": { + + "compressed_heap_size": 32768, + + "compressed_row_count": 4, + + "compressed_toast_size": 32768, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 32768, + + "uncompressed_row_count": 284, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 65536, + + "num_compressed_hypertables": 1, + + "compressed_row_count_frozen_immediately": 4 + + }, + + "indexes_size": 122880, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 413 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 180224, + + "toast_size": 24576, + + "compression": { + + "compressed_heap_size": 40960, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920, + + "compressed_row_count_frozen_immediately": 10+ + }, + + "indexes_size": 180224, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 1 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- check telemetry for fixed schedule jobs works +create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_fixed'; +end +$$; +create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_drifting'; +end +$$; +-- before adding the jobs +select get_telemetry_report()->'num_user_defined_actions_fixed'; + ?column? +---------- + 0 +(1 row) + +select get_telemetry_report()->'num_user_defined_actions'; + ?column? +---------- + 0 +(1 row) + +select add_job('job_test_fixed', '1 week'); + add_job +--------- + 1000 +(1 row) + +select add_job('job_test_drifting', '1 week', fixed_schedule => false); + add_job +--------- + 1001 +(1 row) + +-- add continuous aggregate refresh policy for contagg +select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +-- add retention policy, fixed +select add_retention_policy('hyper', interval '1 year', initial_start => now()); + add_retention_policy +---------------------- + 1004 +(1 row) + +-- add compression policy +select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); + add_compression_policy +------------------------ + 1005 +(1 row) + +select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; + uda_fixed | uda_drifting +-----------+-------------- + 1 | 1 +(1 row) + +select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; + contagg_fixed | contagg_drifting +---------------+------------------ + 1 | 1 +(1 row) + +select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; + compress_fixed | retention_fixed +----------------+----------------- + 1 | 1 +(1 row) + +DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; +TRUNCATE _timescaledb_internal.job_errors; +-- create some "errors" for testing +INSERT INTO +_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) +VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), +(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), +(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'), +(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'), +(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'), +-- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions +(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'); +-- create some errors for them +INSERT INTO +_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) +values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), +(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), +(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), +(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'), +(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'), +(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'), +(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'); +-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs +SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); + jsonb_pretty +---------------------------------------------- + { + + "policy_retention": { + + "P0001": 1 + + }, + + "policy_compression": { + + "JF009": 1 + + }, + + "user_defined_action": { + + "ABCDE": 1, + + "P0001": 2 + + }, + + "policy_refresh_continuous_aggregate": {+ + "P0001": 2 + + } + + } +(1 row) + +-- for job statistics, insert some records into bgw_job_stats +INSERT INTO _timescaledb_internal.bgw_job_stat +values +(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); +SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); + jsonb_pretty +------------------------------------------------ + { + + "policy_retention": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_compression": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "user_defined_action": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_refresh_continuous_aggregate": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + } + + } +(1 row) + +-- create nested continuous aggregates - copied from cagg_on_cagg_common +CREATE TABLE conditions ( + time timestamptz NOT NULL, + temperature int +); +SELECT create_hypertable('conditions', 'time'); + create_hypertable +------------------------- + (6,public,conditions,t) +(1 row) + +CREATE MATERIALIZED VIEW conditions_summary_hourly_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 hour', "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_daily_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 day', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_hourly_1 +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_weekly_3 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 week', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_daily_2 +GROUP BY 1 +WITH NO DATA; +SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); + jsonb_pretty +-------------- + 2 +(1 row) + +DROP VIEW relations; +DROP MATERIALIZED VIEW telemetry_report; diff --git a/tsl/test/expected/telemetry_stats-14.out b/tsl/test/expected/telemetry_stats-14.out new file mode 100644 index 00000000000..dbebefc4231 --- /dev/null +++ b/tsl/test/expected/telemetry_stats-14.out @@ -0,0 +1,748 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +--telemetry tests that require a community license +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +-- function call info size is too variable for this test, so disable it +SET timescaledb.telemetry_level='no_functions'; +SELECT setseed(1); + setseed +--------- + +(1 row) + +-- Create a materialized view from the telemetry report so that we +-- don't regenerate telemetry for every query. Filter heap_size for +-- materialized views since PG14 reports a different heap size for +-- them compared to earlier PG versions. +CREATE MATERIALIZED VIEW telemetry_report AS +SELECT (r #- '{relations,materialized_views,heap_size}') AS r +FROM get_telemetry_report() r; +CREATE VIEW relations AS +SELECT r -> 'relations' AS rels +FROM telemetry_report; +SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, + rels -> 'hypertables' -> 'num_relations' AS num_hypertables +FROM relations; + num_continuous_aggs | num_hypertables +---------------------+----------------- + 0 | 0 +(1 row) + +-- check telemetry picks up flagged content from metadata +SELECT r -> 'db_metadata' AS db_metadata +FROM telemetry_report; + db_metadata +------------- + {} +(1 row) + +-- check timescaledb_telemetry.cloud +SELECT r -> 'instance_metadata' AS instance_metadata +FROM telemetry_report r; + instance_metadata +------------------- + {"cloud": "ci"} +(1 row) + +CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); +CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); +CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); +CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); +CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE hyper (LIKE normal); +SELECT table_name FROM create_hypertable('hyper', 'time'); + table_name +------------ + hyper +(1 row) + +CREATE MATERIALIZED VIEW contagg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg" is already up-to-date +CREATE MATERIALIZED VIEW contagg_old +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg_old" is already up-to-date +-- Create another view (already have the "relations" view) +CREATE VIEW devices AS +SELECT DISTINCT ON (device) device +FROM hyper; +-- Show relations with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 0, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 0 + + }, + + "hypertables": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 8192, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 0, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "continuous_aggregates": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- Insert data +INSERT INTO normal +SELECT t, ceil(random() * 10)::int, random() * 30 +FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; +INSERT INTO hyper +SELECT * FROM normal; +INSERT INTO part +SELECT * FROM normal; +CALL refresh_continuous_aggregate('contagg', NULL, NULL); +CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); +-- ANALYZE to get updated reltuples stats +ANALYZE normal, hyper, part; +SELECT count(c) FROM show_chunks('hyper') c; + count +------- + 9 +(1 row) + +SELECT count(c) FROM show_chunks('contagg') c; + count +------- + 2 +(1 row) + +SELECT count(c) FROM show_chunks('contagg_old') c; + count +------- + 2 +(1 row) + +-- Update and show the telemetry report +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 155648, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 16384, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 229376, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- Actual row count should be the same as reltuples stats for all tables +SELECT (SELECT count(*) FROM normal) num_inserted_rows, + (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, + (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, + (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; + num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples +-------------------+------------------+-----------------+---------------- + 697 | 697 | 697 | 697 +(1 row) + +-- Add compression +ALTER TABLE hyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('hyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk +(4 rows) + +ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); +NOTICE: defaulting compress_segmentby to device +NOTICE: defaulting compress_orderby to hour +SELECT compress_chunk(c) +FROM show_chunks('contagg') c ORDER BY c LIMIT 1; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_2_10_chunk +(1 row) + +-- Turn of real-time aggregation +ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); +ANALYZE normal, hyper, part; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 106496, + + "toast_size": 32768, + + "compression": { + + "compressed_heap_size": 65536, + + "compressed_row_count": 4, + + "compressed_toast_size": 32768, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 32768, + + "uncompressed_row_count": 284, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 65536, + + "num_compressed_hypertables": 1, + + "compressed_row_count_frozen_immediately": 4 + + }, + + "indexes_size": 122880, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 413 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 24576, + + "compression": { + + "compressed_heap_size": 49152, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920, + + "compressed_row_count_frozen_immediately": 10+ + }, + + "indexes_size": 180224, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 1 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- check telemetry for fixed schedule jobs works +create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_fixed'; +end +$$; +create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_drifting'; +end +$$; +-- before adding the jobs +select get_telemetry_report()->'num_user_defined_actions_fixed'; + ?column? +---------- + 0 +(1 row) + +select get_telemetry_report()->'num_user_defined_actions'; + ?column? +---------- + 0 +(1 row) + +select add_job('job_test_fixed', '1 week'); + add_job +--------- + 1000 +(1 row) + +select add_job('job_test_drifting', '1 week', fixed_schedule => false); + add_job +--------- + 1001 +(1 row) + +-- add continuous aggregate refresh policy for contagg +select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +-- add retention policy, fixed +select add_retention_policy('hyper', interval '1 year', initial_start => now()); + add_retention_policy +---------------------- + 1004 +(1 row) + +-- add compression policy +select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); + add_compression_policy +------------------------ + 1005 +(1 row) + +select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; + uda_fixed | uda_drifting +-----------+-------------- + 1 | 1 +(1 row) + +select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; + contagg_fixed | contagg_drifting +---------------+------------------ + 1 | 1 +(1 row) + +select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; + compress_fixed | retention_fixed +----------------+----------------- + 1 | 1 +(1 row) + +DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; +TRUNCATE _timescaledb_internal.job_errors; +-- create some "errors" for testing +INSERT INTO +_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) +VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), +(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), +(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'), +(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'), +(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'), +-- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions +(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'); +-- create some errors for them +INSERT INTO +_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) +values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), +(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), +(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), +(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'), +(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'), +(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'), +(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'); +-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs +SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); + jsonb_pretty +---------------------------------------------- + { + + "policy_retention": { + + "P0001": 1 + + }, + + "policy_compression": { + + "JF009": 1 + + }, + + "user_defined_action": { + + "ABCDE": 1, + + "P0001": 2 + + }, + + "policy_refresh_continuous_aggregate": {+ + "P0001": 2 + + } + + } +(1 row) + +-- for job statistics, insert some records into bgw_job_stats +INSERT INTO _timescaledb_internal.bgw_job_stat +values +(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); +SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); + jsonb_pretty +------------------------------------------------ + { + + "policy_retention": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_compression": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "user_defined_action": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_refresh_continuous_aggregate": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + } + + } +(1 row) + +-- create nested continuous aggregates - copied from cagg_on_cagg_common +CREATE TABLE conditions ( + time timestamptz NOT NULL, + temperature int +); +SELECT create_hypertable('conditions', 'time'); + create_hypertable +------------------------- + (6,public,conditions,t) +(1 row) + +CREATE MATERIALIZED VIEW conditions_summary_hourly_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 hour', "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_daily_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 day', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_hourly_1 +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_weekly_3 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 week', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_daily_2 +GROUP BY 1 +WITH NO DATA; +SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); + jsonb_pretty +-------------- + 2 +(1 row) + +DROP VIEW relations; +DROP MATERIALIZED VIEW telemetry_report; diff --git a/tsl/test/expected/telemetry_stats-15.out b/tsl/test/expected/telemetry_stats-15.out index b2f196b6f1c..dbebefc4231 100644 --- a/tsl/test/expected/telemetry_stats-15.out +++ b/tsl/test/expected/telemetry_stats-15.out @@ -87,119 +87,123 @@ FROM hyper; -- Show relations with no data REFRESH MATERIALIZED VIEW telemetry_report; SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 0, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 0 + - }, + - "hypertables": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 8192, + - "num_children": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 0, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "continuous_aggregates": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "num_compressed_caggs": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 0, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 0 + + }, + + "hypertables": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 8192, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 0, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "continuous_aggregates": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + } (1 row) @@ -236,119 +240,123 @@ SELECT count(c) FROM show_chunks('contagg_old') c; -- Update and show the telemetry report REFRESH MATERIALIZED VIEW telemetry_report; SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 65536, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 697 + - }, + - "hypertables": { + - "heap_size": 73728, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 155648, + - "num_children": 9, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 98304, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "continuous_aggregates": { + - "heap_size": 188416, + - "toast_size": 16384, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "num_compressed_caggs": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0 + - }, + - "indexes_size": 229376, + - "num_children": 4, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 155648, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 16384, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 229376, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + } (1 row) @@ -389,577 +397,123 @@ ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); ANALYZE normal, hyper, part; REFRESH MATERIALIZED VIEW telemetry_report; SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 65536, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 697 + - }, + - "hypertables": { + - "heap_size": 106496, + - "toast_size": 32768, + - "compression": { + - "compressed_heap_size": 65536, + - "compressed_row_count": 4, + - "compressed_toast_size": 32768, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 32768, + - "uncompressed_row_count": 284, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 65536, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 122880, + - "num_children": 9, + - "num_relations": 1, + - "num_reltuples": 413 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 98304, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "continuous_aggregates": { + - "heap_size": 188416, + - "toast_size": 24576, + - "compression": { + - "compressed_heap_size": 49152, + - "compressed_row_count": 10, + - "num_compressed_caggs": 1, + - "compressed_toast_size": 8192, + - "num_compressed_chunks": 1, + - "uncompressed_heap_size": 49152, + - "uncompressed_row_count": 452, + - "compressed_indexes_size": 16384, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 81920 + - }, + - "indexes_size": 180224, + - "num_children": 4, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 1 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + - } -(1 row) - --- Add distributed hypertables -\set DN_DBNAME_1 :TEST_DBNAME _1 -\set DN_DBNAME_2 :TEST_DBNAME _2 --- Not an access node or data node -SELECT r -> 'num_data_nodes' AS num_data_nodes, - r -> 'distributed_member' AS distributed_member -FROM telemetry_report; - num_data_nodes | distributed_member -----------------+-------------------- - | "none" -(1 row) - --- Become an access node by adding a data node -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); - node_name | database | node_created | database_created | extension_created --------------+----------------------+--------------+------------------+------------------- - data_node_1 | db_telemetry_stats_1 | t | t | t -(1 row) - --- Telemetry should show one data node and "acces node" status -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT r -> 'num_data_nodes' AS num_data_nodes, - r -> 'distributed_member' AS distributed_member -FROM telemetry_report; - num_data_nodes | distributed_member -----------------+-------------------- - 1 | "access node" -(1 row) - --- See telemetry report from a data node -\ir include/remote_exec.sql --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. -CREATE SCHEMA IF NOT EXISTS test; -psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping -GRANT USAGE ON SCHEMA test TO PUBLIC; -CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) -RETURNS VOID -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' -LANGUAGE C; -CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) -RETURNS TABLE("table_record" CSTRING[]) -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' -LANGUAGE C; -SELECT test.remote_exec(NULL, $$ - SELECT t -> 'num_data_nodes' AS num_data_nodes, - t -> 'distributed_member' AS distributed_member - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT t -> 'num_data_nodes' AS num_data_nodes, - t -> 'distributed_member' AS distributed_member - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -num_data_nodes|distributed_member ---------------+------------------ - |"data node" -(1 row) - - - remote_exec -------------- - -(1 row) - -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); - node_name | database | node_created | database_created | extension_created --------------+----------------------+--------------+------------------+------------------- - data_node_2 | db_telemetry_stats_2 | t | t | t -(1 row) - -CREATE TABLE disthyper (LIKE normal); -SELECT create_distributed_hypertable('disthyper', 'time', 'device'); - create_distributed_hypertable -------------------------------- - (6,public,disthyper,t) -(1 row) - --- Show distributed hypertables stats with no data -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 1, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- No datanode-related stats on the access node -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn -FROM relations; - distributed_hypertables_dn ------------------------------------------ - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0,+ - "num_compressed_hypertables": 0+ - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - } -(1 row) - --- Insert data into the distributed hypertable -INSERT INTO disthyper -SELECT * FROM normal; --- Update telemetry stats and show output on access node and data --- nodes. Note that the access node doesn't store data so shows --- zero. It should have stats from ANALYZE, though, like --- num_reltuples. -ANALYZE disthyper; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 18, + - "num_relations": 1, + - "num_reltuples": 697, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- Show data node stats -SELECT test.remote_exec(NULL, $$ - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -distributed_hypertables_dn --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -{ - "heap_size": 73728, - "toast_size": 0, - "compression": { - "compressed_heap_size": 0, - "compressed_row_count": 0, - "compressed_toast_size": 0, - "num_compressed_chunks": 0, - "uncompressed_heap_size": 0, - "uncompressed_row_count": 0, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 0, - "num_compressed_hypertables": 0 - }, - "indexes_size": 311296, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 368 -} -(1 row) - - -NOTICE: [data_node_2]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_2]: -distributed_hypertables_dn --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -{ - "heap_size": 73728, - "toast_size": 0, - "compression": { - "compressed_heap_size": 0, - "compressed_row_count": 0, - "compressed_toast_size": 0, - "num_compressed_chunks": 0, - "uncompressed_heap_size": 0, - "uncompressed_row_count": 0, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 0, - "num_compressed_hypertables": 0 - }, - "indexes_size": 311296, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 329 -} -(1 row) - - - remote_exec -------------- - -(1 row) - --- Add compression -ALTER TABLE disthyper SET (timescaledb.compress); -SELECT compress_chunk(c) -FROM show_chunks('disthyper') c ORDER BY c LIMIT 4; - compress_chunk ----------------------------------------------- - _timescaledb_internal._dist_hyper_6_19_chunk - _timescaledb_internal._dist_hyper_6_20_chunk - _timescaledb_internal._dist_hyper_6_21_chunk - _timescaledb_internal._dist_hyper_6_22_chunk -(4 rows) - -ANALYZE disthyper; --- Update telemetry stats and show updated compression stats -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 0, + - "num_children": 18, + - "num_relations": 1, + - "num_reltuples": 581, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- Show data node stats -SELECT test.remote_exec(NULL, $$ - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -distributed_hypertables_dn ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -{ - "heap_size": 90112, - "toast_size": 16384, - "compression": { - "compressed_heap_size": 32768, - "compressed_row_count": 2, - "compressed_toast_size": 16384, - "num_compressed_chunks": 2, - "uncompressed_heap_size": 16384, - "uncompressed_row_count": 56, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 65536, - "num_compressed_hypertables": 1 - }, - "indexes_size": 278528, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 312 -} -(1 row) - - -NOTICE: [data_node_2]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_2]: -distributed_hypertables_dn ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -{ - "heap_size": 90112, - "toast_size": 16384, - "compression": { - "compressed_heap_size": 32768, - "compressed_row_count": 2, - "compressed_toast_size": 16384, - "num_compressed_chunks": 2, - "uncompressed_heap_size": 16384, - "uncompressed_row_count": 60, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 65536, - "num_compressed_hypertables": 1 - }, - "indexes_size": 278528, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 269 -} -(1 row) - - - remote_exec -------------- - -(1 row) - --- Create a replicated distributed hypertable and show replication stats -CREATE TABLE disthyper_repl (LIKE normal); -SELECT create_distributed_hypertable('disthyper_repl', 'time', 'device', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (7,public,disthyper_repl,t) -(1 row) - -INSERT INTO disthyper_repl -SELECT * FROM normal; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 0, + - "num_children": 36, + - "num_relations": 2, + - "num_reltuples": 581, + - "num_replica_chunks": 18, + - "num_replicated_distributed_hypertables": 1+ - } -(1 row) - --- Create a continuous aggregate on the distributed hypertable -CREATE MATERIALIZED VIEW distcontagg -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - disthyper -GROUP BY hour, device; -NOTICE: refreshing continuous aggregate "distcontagg" -CREATE MATERIALIZED VIEW distcontagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - disthyper -GROUP BY hour, device; -NOTICE: refreshing continuous aggregate "distcontagg_old" -VACUUM; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'continuous_aggregates') AS continuous_aggregates -FROM relations; - continuous_aggregates ------------------------------------------------- - { + - "heap_size": 434176, + - "toast_size": 40960, + - "compression": { + - "compressed_heap_size": 49152, + - "compressed_row_count": 10, + - "num_compressed_caggs": 1, + - "compressed_toast_size": 8192, + - "num_compressed_chunks": 1, + - "uncompressed_heap_size": 49152, + - "uncompressed_row_count": 452, + - "compressed_indexes_size": 16384, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 81920 + - }, + - "indexes_size": 409600, + - "num_children": 8, + - "num_relations": 4, + - "num_reltuples": 2336, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 2, + - "num_caggs_on_distributed_hypertables": 2,+ - "num_caggs_using_real_time_aggregation": 3+ + relations +----------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 106496, + + "toast_size": 32768, + + "compression": { + + "compressed_heap_size": 65536, + + "compressed_row_count": 4, + + "compressed_toast_size": 32768, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 32768, + + "uncompressed_row_count": 284, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 65536, + + "num_compressed_hypertables": 1, + + "compressed_row_count_frozen_immediately": 4 + + }, + + "indexes_size": 122880, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 413 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 24576, + + "compression": { + + "compressed_heap_size": 49152, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920, + + "compressed_row_count_frozen_immediately": 10+ + }, + + "indexes_size": 180224, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 1 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + } (1 row) @@ -1155,9 +709,9 @@ CREATE TABLE conditions ( temperature int ); SELECT create_hypertable('conditions', 'time'); - create_hypertable --------------------------- - (10,public,conditions,t) + create_hypertable +------------------------- + (6,public,conditions,t) (1 row) CREATE MATERIALIZED VIEW conditions_summary_hourly_1 @@ -1192,6 +746,3 @@ SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggrega DROP VIEW relations; DROP MATERIALIZED VIEW telemetry_report; -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -DROP DATABASE :DN_DBNAME_1 WITH (FORCE); -DROP DATABASE :DN_DBNAME_2 WITH (FORCE); diff --git a/tsl/test/expected/telemetry_stats-16.out b/tsl/test/expected/telemetry_stats-16.out new file mode 100644 index 00000000000..dbebefc4231 --- /dev/null +++ b/tsl/test/expected/telemetry_stats-16.out @@ -0,0 +1,748 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +--telemetry tests that require a community license +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +-- function call info size is too variable for this test, so disable it +SET timescaledb.telemetry_level='no_functions'; +SELECT setseed(1); + setseed +--------- + +(1 row) + +-- Create a materialized view from the telemetry report so that we +-- don't regenerate telemetry for every query. Filter heap_size for +-- materialized views since PG14 reports a different heap size for +-- them compared to earlier PG versions. +CREATE MATERIALIZED VIEW telemetry_report AS +SELECT (r #- '{relations,materialized_views,heap_size}') AS r +FROM get_telemetry_report() r; +CREATE VIEW relations AS +SELECT r -> 'relations' AS rels +FROM telemetry_report; +SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, + rels -> 'hypertables' -> 'num_relations' AS num_hypertables +FROM relations; + num_continuous_aggs | num_hypertables +---------------------+----------------- + 0 | 0 +(1 row) + +-- check telemetry picks up flagged content from metadata +SELECT r -> 'db_metadata' AS db_metadata +FROM telemetry_report; + db_metadata +------------- + {} +(1 row) + +-- check timescaledb_telemetry.cloud +SELECT r -> 'instance_metadata' AS instance_metadata +FROM telemetry_report r; + instance_metadata +------------------- + {"cloud": "ci"} +(1 row) + +CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); +CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); +CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); +CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); +CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE hyper (LIKE normal); +SELECT table_name FROM create_hypertable('hyper', 'time'); + table_name +------------ + hyper +(1 row) + +CREATE MATERIALIZED VIEW contagg +WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg" is already up-to-date +CREATE MATERIALIZED VIEW contagg_old +WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg_old" is already up-to-date +-- Create another view (already have the "relations" view) +CREATE VIEW devices AS +SELECT DISTINCT ON (device) device +FROM hyper; +-- Show relations with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 0, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 0 + + }, + + "hypertables": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 8192, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 0, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "continuous_aggregates": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- Insert data +INSERT INTO normal +SELECT t, ceil(random() * 10)::int, random() * 30 +FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; +INSERT INTO hyper +SELECT * FROM normal; +INSERT INTO part +SELECT * FROM normal; +CALL refresh_continuous_aggregate('contagg', NULL, NULL); +CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); +-- ANALYZE to get updated reltuples stats +ANALYZE normal, hyper, part; +SELECT count(c) FROM show_chunks('hyper') c; + count +------- + 9 +(1 row) + +SELECT count(c) FROM show_chunks('contagg') c; + count +------- + 2 +(1 row) + +SELECT count(c) FROM show_chunks('contagg_old') c; + count +------- + 2 +(1 row) + +-- Update and show the telemetry report +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +---------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 155648, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 16384, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 229376, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- Actual row count should be the same as reltuples stats for all tables +SELECT (SELECT count(*) FROM normal) num_inserted_rows, + (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, + (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, + (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; + num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples +-------------------+------------------+-----------------+---------------- + 697 | 697 | 697 | 697 +(1 row) + +-- Add compression +ALTER TABLE hyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('hyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk +(4 rows) + +ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); +NOTICE: defaulting compress_segmentby to device +NOTICE: defaulting compress_orderby to hour +SELECT compress_chunk(c) +FROM show_chunks('contagg') c ORDER BY c LIMIT 1; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_2_10_chunk +(1 row) + +-- Turn of real-time aggregation +ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); +ANALYZE normal, hyper, part; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 106496, + + "toast_size": 32768, + + "compression": { + + "compressed_heap_size": 65536, + + "compressed_row_count": 4, + + "compressed_toast_size": 32768, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 32768, + + "uncompressed_row_count": 284, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 65536, + + "num_compressed_hypertables": 1, + + "compressed_row_count_frozen_immediately": 4 + + }, + + "indexes_size": 122880, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 413 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 24576, + + "compression": { + + "compressed_heap_size": 49152, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920, + + "compressed_row_count_frozen_immediately": 10+ + }, + + "indexes_size": 180224, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 1 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0, + + "compressed_row_count_frozen_immediately": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0 + + } + + } +(1 row) + +-- check telemetry for fixed schedule jobs works +create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_fixed'; +end +$$; +create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_drifting'; +end +$$; +-- before adding the jobs +select get_telemetry_report()->'num_user_defined_actions_fixed'; + ?column? +---------- + 0 +(1 row) + +select get_telemetry_report()->'num_user_defined_actions'; + ?column? +---------- + 0 +(1 row) + +select add_job('job_test_fixed', '1 week'); + add_job +--------- + 1000 +(1 row) + +select add_job('job_test_drifting', '1 week', fixed_schedule => false); + add_job +--------- + 1001 +(1 row) + +-- add continuous aggregate refresh policy for contagg +select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +-- add retention policy, fixed +select add_retention_policy('hyper', interval '1 year', initial_start => now()); + add_retention_policy +---------------------- + 1004 +(1 row) + +-- add compression policy +select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); + add_compression_policy +------------------------ + 1005 +(1 row) + +select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; + uda_fixed | uda_drifting +-----------+-------------- + 1 | 1 +(1 row) + +select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; + contagg_fixed | contagg_drifting +---------------+------------------ + 1 | 1 +(1 row) + +select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; + compress_fixed | retention_fixed +----------------+----------------- + 1 | 1 +(1 row) + +DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; +TRUNCATE _timescaledb_internal.job_errors; +-- create some "errors" for testing +INSERT INTO +_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) +VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), +(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), +(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'), +(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'), +(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'), +-- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions +(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'); +-- create some errors for them +INSERT INTO +_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) +values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), +(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), +(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), +(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'), +(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'), +(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'), +(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'); +-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs +SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); + jsonb_pretty +---------------------------------------------- + { + + "policy_retention": { + + "P0001": 1 + + }, + + "policy_compression": { + + "JF009": 1 + + }, + + "user_defined_action": { + + "ABCDE": 1, + + "P0001": 2 + + }, + + "policy_refresh_continuous_aggregate": {+ + "P0001": 2 + + } + + } +(1 row) + +-- for job statistics, insert some records into bgw_job_stats +INSERT INTO _timescaledb_internal.bgw_job_stat +values +(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); +SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); + jsonb_pretty +------------------------------------------------ + { + + "policy_retention": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_compression": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "user_defined_action": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_refresh_continuous_aggregate": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + } + + } +(1 row) + +-- create nested continuous aggregates - copied from cagg_on_cagg_common +CREATE TABLE conditions ( + time timestamptz NOT NULL, + temperature int +); +SELECT create_hypertable('conditions', 'time'); + create_hypertable +------------------------- + (6,public,conditions,t) +(1 row) + +CREATE MATERIALIZED VIEW conditions_summary_hourly_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 hour', "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_daily_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 day', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_hourly_1 +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_weekly_3 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 week', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_daily_2 +GROUP BY 1 +WITH NO DATA; +SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); + jsonb_pretty +-------------- + 2 +(1 row) + +DROP VIEW relations; +DROP MATERIALIZED VIEW telemetry_report; diff --git a/tsl/test/isolation/specs/CMakeLists.txt b/tsl/test/isolation/specs/CMakeLists.txt index 8c5eb05d818..fb103b5001b 100644 --- a/tsl/test/isolation/specs/CMakeLists.txt +++ b/tsl/test/isolation/specs/CMakeLists.txt @@ -27,13 +27,9 @@ endif() if(CMAKE_BUILD_TYPE MATCHES Debug) list(APPEND TEST_TEMPLATES_MODULE ${TEST_TEMPLATES_MODULE_DEBUG}) - list( - APPEND - TEST_FILES - compression_chunk_race.spec - compression_freeze.spec - compression_merge_race.spec - decompression_chunk_and_parallel_query_wo_idx.spec) + list(APPEND TEST_FILES compression_chunk_race.spec compression_freeze.spec + compression_merge_race.spec + decompression_chunk_and_parallel_query_wo_idx.spec) if(PG_VERSION VERSION_GREATER_EQUAL "14.0") list(APPEND TEST_FILES freeze_chunk.spec compression_dml_iso.spec) endif() diff --git a/tsl/test/sql/.gitignore b/tsl/test/sql/.gitignore index 94699715054..ff836c95834 100644 --- a/tsl/test/sql/.gitignore +++ b/tsl/test/sql/.gitignore @@ -31,4 +31,5 @@ /remote-copy-*sv /transparent_decompression-*.sql /transparent_decompression_ordered_index-*.sql +/telemetry_stats-*.sql /merge_append_partially_compressed-*.sql diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 3c50a3a1e36..18f7c623c1d 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -82,9 +82,6 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) recompress_chunk_segmentwise.sql transparent_decompression_join_index.sql feature_flags.sql) - if(USE_TELEMETRY) - list(APPEND TEST_FILES telemetry_stats.sql) - endif() if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16") list( @@ -201,6 +198,9 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) continuous_aggs.sql.in continuous_aggs_deprecated.sql.in deparse.sql.in) + if(USE_TELEMETRY) + list(APPEND TEST_TEMPLATES telemetry_stats.sql.in) + endif() if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16") list( APPEND diff --git a/tsl/test/sql/telemetry_stats.sql b/tsl/test/sql/telemetry_stats.sql.in similarity index 100% rename from tsl/test/sql/telemetry_stats.sql rename to tsl/test/sql/telemetry_stats.sql.in