From 34a483dd8c1c60d64ccc11480dc52b13f2f3f4e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabr=C3=ADzio=20de=20Royes=20Mello?= Date: Tue, 10 Oct 2023 16:06:29 -0300 Subject: [PATCH] PG16: Fix multinode deparsing issues With the changes introduced in PG16 by moving the permission checking information out of the range table entries to a new data struct named `RTEPermissionInfo` we can't use the rte->updatedcols anymore for the target_attrs when deparsing, so we need to build new target_attrs based on the `get_rel_all_updated_cols` and for our multinode implementation we need to get rid the generated always attributes to don't risk to build the parameters in `stmt_params_create` using this column. Postgres FDW also have it own logic to skip generated columns as well. postgres/postgres@a61b1f74 --- tsl/src/fdw/deparse.c | 11 +- tsl/src/fdw/modify_plan.c | 114 +-- ...gw_dist_ht.out => cagg_bgw_dist_ht-13.out} | 0 tsl/test/expected/cagg_bgw_dist_ht-14.out | 744 ++++++++++++++++ tsl/test/expected/cagg_bgw_dist_ht-15.out | 744 ++++++++++++++++ tsl/test/expected/cagg_bgw_dist_ht-16.out | 744 ++++++++++++++++ tsl/test/expected/cagg_ddl_dist_ht-13.out | 125 +-- tsl/test/expected/cagg_ddl_dist_ht-14.out | 127 +-- tsl/test/expected/cagg_ddl_dist_ht-15.out | 127 +-- tsl/test/expected/dist_hypertable-16.out | 5 +- tsl/test/expected/dist_param-13.out | 815 ++++++++++++++++++ tsl/test/expected/dist_param-14.out | 815 ++++++++++++++++++ tsl/test/expected/dist_param-15.out | 815 ++++++++++++++++++ .../{dist_param.out => dist_param-16.out} | 0 tsl/test/sql/.gitignore | 2 + tsl/test/sql/CMakeLists.txt | 8 +- ...gw_dist_ht.sql => cagg_bgw_dist_ht.sql.in} | 0 .../sql/{dist_param.sql => dist_param.sql.in} | 0 18 files changed, 4952 insertions(+), 244 deletions(-) rename tsl/test/expected/{cagg_bgw_dist_ht.out => cagg_bgw_dist_ht-13.out} (100%) create mode 100644 tsl/test/expected/cagg_bgw_dist_ht-14.out create mode 100644 tsl/test/expected/cagg_bgw_dist_ht-15.out create mode 100644 tsl/test/expected/cagg_bgw_dist_ht-16.out create mode 100644 tsl/test/expected/dist_param-13.out create mode 100644 tsl/test/expected/dist_param-14.out create mode 100644 tsl/test/expected/dist_param-15.out rename tsl/test/expected/{dist_param.out => dist_param-16.out} (100%) rename tsl/test/sql/{cagg_bgw_dist_ht.sql => cagg_bgw_dist_ht.sql.in} (100%) rename tsl/test/sql/{dist_param.sql => dist_param.sql.in} (100%) diff --git a/tsl/src/fdw/deparse.c b/tsl/src/fdw/deparse.c index 4c0623093b8..679745c9b6c 100644 --- a/tsl/src/fdw/deparse.c +++ b/tsl/src/fdw/deparse.c @@ -1956,6 +1956,7 @@ void deparseUpdateSql(StringInfo buf, RangeTblEntry *rte, Index rtindex, Relation rel, List *targetAttrs, List *returningList, List **retrieved_attrs) { + TupleDesc tupdesc = RelationGetDescr(rel); AttrNumber pindex; bool first; ListCell *lc; @@ -1969,14 +1970,20 @@ deparseUpdateSql(StringInfo buf, RangeTblEntry *rte, Index rtindex, Relation rel foreach (lc, targetAttrs) { int attnum = lfirst_int(lc); + Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1); if (!first) appendStringInfoString(buf, ", "); first = false; deparseColumnRef(buf, rtindex, attnum, rte, false); - appendStringInfo(buf, " = $%d", pindex); - pindex++; + if (attr->attgenerated) + appendStringInfoString(buf, " = DEFAULT"); + else + { + appendStringInfo(buf, " = $%d", pindex); + pindex++; + } } appendStringInfoString(buf, " WHERE ctid = $1"); diff --git a/tsl/src/fdw/modify_plan.c b/tsl/src/fdw/modify_plan.c index 5a9dc8fafd2..7d1e8ce5b21 100644 --- a/tsl/src/fdw/modify_plan.c +++ b/tsl/src/fdw/modify_plan.c @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include #include "deparse.h" @@ -15,44 +17,6 @@ #include "modify_plan.h" #include "ts_catalog/chunk_data_node.h" -static List * -get_insert_attrs(Relation rel) -{ - TupleDesc tupdesc = RelationGetDescr(rel); - List *attrs = NIL; - int i; - - for (i = 0; i < tupdesc->natts; i++) - { - Form_pg_attribute attr = TupleDescAttr(tupdesc, i); - - if (!attr->attisdropped) - attrs = lappend_int(attrs, AttrOffsetGetAttrNumber(i)); - } - - return attrs; -} - -static List * -get_update_attrs(Bitmapset *updatedCols) -{ - List *attrs = NIL; - int col = -1; - - while ((col = bms_next_member(updatedCols, col)) >= 0) - { - /* bit numbers are offset by FirstLowInvalidHeapAttributeNumber */ - AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber; - - if (attno <= InvalidAttrNumber) /* shouldn't happen */ - elog(ERROR, "system-column update is not supported"); - - attrs = lappend_int(attrs, attno); - } - - return attrs; -} - /* get a list of "live" DNs associated with this chunk */ List * get_chunk_data_nodes(Oid relid) @@ -144,6 +108,62 @@ fdw_plan_foreign_modify(PlannerInfo *root, ModifyTable *plan, Index result_relat if (plan->returningLists) returning_list = (List *) list_nth(plan->returningLists, subplan_index); + /* + * Core code already has some lock on each rel being planned, so we can + * use NoLock here. + */ + rel = table_open(rte->relid, NoLock); + TupleDesc tupdesc = RelationGetDescr(rel); + + /* + * In an INSERT, we transmit all columns that are defined in the foreign + * table. In an UPDATE, if there are BEFORE ROW UPDATE triggers on the + * foreign table, we transmit all columns like INSERT; else we transmit + * only columns that were explicitly targets of the UPDATE, so as to avoid + * unnecessary data transmission. (We can't do that for INSERT since we + * would miss sending default values for columns not listed in the source + * statement, and for UPDATE if there are BEFORE ROW UPDATE triggers since + * those triggers might change values for non-target columns, in which + * case we would miss sending changed values for those columns.) + */ + if (operation == CMD_INSERT || + (operation == CMD_UPDATE && rel->trigdesc && rel->trigdesc->trig_update_before_row)) + { + int attnum; + + for (attnum = 1; attnum <= tupdesc->natts; attnum++) + { + Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1); + + if (!attr->attisdropped && attr->attgenerated != '\0') + target_attrs = lappend_int(target_attrs, attnum); + } + } + else if (operation == CMD_UPDATE) + { + RelOptInfo *rel = find_base_rel(root, result_relation); + Bitmapset *allUpdatedCols = NULL; + int col = -1; + + allUpdatedCols = get_rel_all_updated_cols(root, rel); + + while ((col = bms_next_member(allUpdatedCols, col)) >= 0) + { + /* bit numbers are offset by FirstLowInvalidHeapAttributeNumber */ + AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber; + Form_pg_attribute attr = TupleDescAttr(tupdesc, attno - 1); + + if (attno <= InvalidAttrNumber) /* shouldn't happen */ + elog(ERROR, "system-column update is not supported"); + + /* Ignore generated columns */ + if (attr->attgenerated) + continue; + + target_attrs = lappend_int(target_attrs, attno); + } + } + /* * ON CONFLICT DO UPDATE and DO NOTHING case with inference specification * should have already been rejected in the optimizer, as presently there @@ -158,12 +178,6 @@ fdw_plan_foreign_modify(PlannerInfo *root, ModifyTable *plan, Index result_relat errmsg("ON CONFLICT DO UPDATE not supported" " on distributed hypertables"))); - /* - * Core code already has some lock on each rel being planned, so we can - * use NoLock here. - */ - rel = table_open(rte->relid, NoLock); - /* * Construct the SQL command string * @@ -176,7 +190,6 @@ fdw_plan_foreign_modify(PlannerInfo *root, ModifyTable *plan, Index result_relat switch (operation) { case CMD_INSERT: - target_attrs = get_insert_attrs(rel); deparseInsertSql(&sql, rte, result_relation, @@ -189,17 +202,6 @@ fdw_plan_foreign_modify(PlannerInfo *root, ModifyTable *plan, Index result_relat break; case CMD_UPDATE: { -#if PG16_LT - Bitmapset *updatedCols = rte->updatedCols; -#else - Bitmapset *updatedCols = NULL; - if (rte->perminfoindex > 0) - { - RTEPermissionInfo *perminfo = getRTEPermissionInfo(root->parse->rteperminfos, rte); - updatedCols = perminfo->updatedCols; - } -#endif - target_attrs = get_update_attrs(updatedCols); deparseUpdateSql(&sql, rte, result_relation, diff --git a/tsl/test/expected/cagg_bgw_dist_ht.out b/tsl/test/expected/cagg_bgw_dist_ht-13.out similarity index 100% rename from tsl/test/expected/cagg_bgw_dist_ht.out rename to tsl/test/expected/cagg_bgw_dist_ht-13.out diff --git a/tsl/test/expected/cagg_bgw_dist_ht-14.out b/tsl/test/expected/cagg_bgw_dist_ht-14.out new file mode 100644 index 00000000000..e7ca0d9ee6c --- /dev/null +++ b/tsl/test/expected/cagg_bgw_dist_ht-14.out @@ -0,0 +1,744 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +------------------------------------ +-- Set up a distributed environment +------------------------------------ +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SELECT node_name, database, node_created, database_created, extension_created +FROM ( + SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* + FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) +) a; + node_name | database | node_created | database_created | extension_created +-----------------------+-----------------------+--------------+------------------+------------------- + db_cagg_bgw_dist_ht_1 | db_cagg_bgw_dist_ht_1 | t | t | t + db_cagg_bgw_dist_ht_2 | db_cagg_bgw_dist_ht_2 | t | t | t + db_cagg_bgw_dist_ht_3 | db_cagg_bgw_dist_ht_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +\set IS_DISTRIBUTED TRUE +\ir include/cagg_bgw_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- +-- Setup +-- +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(timeout INT = -1, mock_start_time INT = 0) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run(timeout INT = -1, mock_start_time INT = 0) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_wait_for_scheduler_finish() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_create() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_destroy() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_reset_time(set_time BIGINT = 0, wait BOOLEAN = false) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +--test that this all works under the community license +ALTER DATABASE :TEST_DBNAME SET timescaledb.license_key='Community'; +--create a function with no permissions to execute +CREATE FUNCTION get_constant_no_perms() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS +$BODY$ + SELECT 10; +$BODY$; +REVOKE EXECUTE ON FUNCTION get_constant_no_perms() FROM PUBLIC; +\set WAIT_ON_JOB 0 +\set IMMEDIATELY_SET_UNTIL 1 +\set WAIT_FOR_OTHER_TO_ADVANCE 2 +CREATE OR REPLACE FUNCTION ts_bgw_params_mock_wait_returns_immediately(new_val INTEGER) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +-- Remove any default jobs, e.g., telemetry +DELETE FROM _timescaledb_config.bgw_job WHERE TRUE; +TRUNCATE _timescaledb_internal.bgw_job_stat; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE public.bgw_log( + msg_no INT, + mock_time BIGINT, + application_name TEXT, + msg TEXT +); +CREATE VIEW sorted_bgw_log AS + SELECT msg_no, + mock_time, + application_name, + regexp_replace(regexp_replace(msg, '(Wait until|started at|execution time) [0-9]+(\.[0-9]+)?', '\1 (RANDOM)', 'g'), 'background worker "[^"]+"','connection') AS msg + FROM bgw_log ORDER BY mock_time, application_name COLLATE "C", msg_no; +CREATE TABLE public.bgw_dsm_handle_store( + handle BIGINT +); +INSERT INTO public.bgw_dsm_handle_store VALUES (0); +SELECT ts_bgw_params_create(); + ts_bgw_params_create +---------------------- + +(1 row) + +SELECT * FROM _timescaledb_config.bgw_job; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+---------- +(0 rows) + +SELECT * FROM timescaledb_information.job_stats; + hypertable_schema | hypertable_name | job_id | last_run_started_at | last_successful_finish | last_run_status | job_status | last_run_duration | next_start | total_runs | total_successes | total_failures +-------------------+-----------------+--------+---------------------+------------------------+-----------------+------------+-------------------+------------+------------+-----------------+---------------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.continuous_agg; + mat_hypertable_id | raw_hypertable_id | parent_mat_hypertable_id | user_view_schema | user_view_name | partial_view_schema | partial_view_name | bucket_width | direct_view_schema | direct_view_name | materialized_only | finalized +-------------------+-------------------+--------------------------+------------------+----------------+---------------------+-------------------+--------------+--------------------+------------------+-------------------+----------- +(0 rows) + +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +psql:include/cagg_bgw_common.sql:76: WARNING: no privileges were granted for "public" +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE test_continuous_agg_table(time int, data int); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_continuous_agg_table', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_bgw_common.sql:80: NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +---------------------------------------- + (1,public,test_continuous_agg_table,t) +(1 row) + +\else +SELECT create_hypertable('test_continuous_agg_table', 'time', chunk_time_interval => 10); +\endif +CREATE OR REPLACE FUNCTION integer_now_test() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('test_continuous_agg_table', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', NULL, 4::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1000 +(1 row) + +SELECT id as raw_table_id FROM _timescaledb_catalog.hypertable WHERE table_name='test_continuous_agg_table' \gset +-- min distance from end should be 1 +SELECT mat_hypertable_id, user_view_schema, user_view_name, bucket_width +FROM _timescaledb_catalog.continuous_agg; + mat_hypertable_id | user_view_schema | user_view_name | bucket_width +-------------------+------------------+--------------------------+-------------- + 2 | public | test_continuous_agg_view | 2 +(1 row) + +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg \gset +SELECT id AS job_id FROM _timescaledb_config.bgw_job where hypertable_id=:mat_hypertable_id \gset +-- job was created +SELECT * FROM _timescaledb_config.bgw_job where hypertable_id=:mat_hypertable_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------------------+------------------------+-------------------------------------------+---------- + 1000 | Refresh Continuous Aggregate Policy [1000] | @ 12 hours | @ 0 | -1 | @ 12 hours | _timescaledb_functions | policy_refresh_continuous_aggregate | default_perm_user | t | f | | 2 | {"end_offset": 4, "start_offset": null, "mat_hypertable_id": 2} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | +(1 row) + +-- create 10 time buckets +INSERT INTO test_continuous_agg_table + SELECT i, i FROM + (SELECT generate_series(0, 10) as i) AS j; +-- no stats +SELECT job_id, next_start, last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + ORDER BY job_id; + job_id | next_start | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------+------------------+------------+-----------------+----------------+--------------- +(0 rows) + +-- no data in view +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- +(0 rows) + +-- run first time +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1000] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -2147483648, 6 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1000] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_2" + 2 | 0 | Refresh Continuous Aggregate Policy [1000] | inserted 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_2" + 3 | 0 | Refresh Continuous Aggregate Policy [1000] | job 1000 (Refresh Continuous Aggregate Policy [1000]) exiting with success: execution time (RANDOM) ms +(6 rows) + +SELECT * FROM _timescaledb_config.bgw_job where id=:job_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------------------+------------------------+-------------------------------------------+---------- + 1000 | Refresh Continuous Aggregate Policy [1000] | @ 12 hours | @ 0 | -1 | @ 12 hours | _timescaledb_functions | policy_refresh_continuous_aggregate | default_perm_user | t | f | | 2 | {"end_offset": 4, "start_offset": null, "mat_hypertable_id": 2} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | +(1 row) + +-- job ran once, successfully +SELECT job_id, next_start-last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1000 | @ 12 hours | t | 1 | 1 | 0 | 0 +(1 row) + +--clear log for next run of scheduler. +TRUNCATE public.bgw_log; +CREATE FUNCTION wait_for_timer_to_run(started_at INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + num_runs INTEGER; + message TEXT; +BEGIN + select format('[TESTING] Wait until %%, started at %s', started_at) into message; + FOR i in 1..spins + LOOP + SELECT COUNT(*) from bgw_log where msg LIKE message INTO num_runs; + if (num_runs > 0) THEN + RETURN true; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RETURN false; +END +$BODY$; +CREATE FUNCTION wait_for_job_to_run(job_param_id INTEGER, expected_runs INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + num_runs INTEGER; +BEGIN + FOR i in 1..spins + LOOP + SELECT total_successes FROM _timescaledb_internal.bgw_job_stat WHERE job_id=job_param_id INTO num_runs; + if (num_runs = expected_runs) THEN + RETURN true; + ELSEIF (num_runs > expected_runs) THEN + RAISE 'num_runs > expected'; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RETURN false; +END +$BODY$; +--make sure there is 1 job to start with +SELECT wait_for_job_to_run(:job_id, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_FOR_OTHER_TO_ADVANCE); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +--start the scheduler on 0 time +SELECT ts_bgw_params_reset_time(0, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run(extract(epoch from interval '24 hour')::int * 1000, 0); + ts_bgw_db_scheduler_test_run +------------------------------ + +(1 row) + +SELECT wait_for_timer_to_run(0); + wait_for_timer_to_run +----------------------- + t +(1 row) + +--advance to 12:00 so that it runs one more time; now we know the +--scheduler has loaded up the job with the old schedule_interval +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 2); + wait_for_job_to_run +--------------------- + t +(1 row) + +--advance clock 1us to make the scheduler realize the job is done +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+1, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +--alter the refresh interval and check if next_start is altered +SELECT alter_job(:job_id, schedule_interval => '1m', retry_period => '1m'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 min","@ 0",-1,"@ 1 min",t,"{""end_offset"": 4, ""start_offset"": null, ""mat_hypertable_id"": 2}","Sat Jan 01 04:01:00 2000 PST",_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT job_id, next_start - last_finish as until_next, total_runs +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id;; + job_id | until_next | total_runs +--------+------------+------------ + 1000 | @ 1 min | 2 +(1 row) + +--advance to 12:02, job should have run at 12:01 +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+(extract(epoch from interval '2 minute')::bigint * 1000000), true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 3); + wait_for_job_to_run +--------------------- + t +(1 row) + +--next run in 1 minute +SELECT job_id, next_start-last_finish as until_next, total_runs +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id; + job_id | until_next | total_runs +--------+------------+------------ + 1000 | @ 1 min | 3 +(1 row) + +--change next run to be after 30s instead +SELECT (next_start - '30s'::interval) AS "NEW_NEXT_START" +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id \gset +SELECT alter_job(:job_id, next_start => :'NEW_NEXT_START'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 min","@ 0",-1,"@ 1 min",t,"{""end_offset"": 4, ""start_offset"": null, ""mat_hypertable_id"": 2}","Sat Jan 01 04:02:30 2000 PST",_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+(extract(epoch from interval '2 minute 30 seconds')::bigint * 1000000), true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 4); + wait_for_job_to_run +--------------------- + t +(1 row) + +--advance clock to quit scheduler +SELECT ts_bgw_params_reset_time(extract(epoch from interval '25 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +select ts_bgw_db_scheduler_test_wait_for_scheduler_finish(); + ts_bgw_db_scheduler_test_wait_for_scheduler_finish +---------------------------------------------------- + +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_ON_JOB); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +TRUNCATE public.bgw_log; +-- data before 8 +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 +(3 rows) + +-- invalidations test by running job multiple times +SELECT ts_bgw_params_reset_time(); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +DROP MATERIALIZED VIEW test_continuous_agg_view; +psql:include/cagg_bgw_common.sql:234: NOTICE: drop cascades to table _timescaledb_internal._hyper_2_3_chunk +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', 100::integer, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1001 +(1 row) + +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg \gset +SELECT id AS job_id FROM _timescaledb_config.bgw_job WHERE hypertable_id=:mat_hypertable_id \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms +(6 rows) + +-- job ran once, successfully +SELECT job_id, last_finish - next_start as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+----------------+------------------+------------+-----------------+----------------+--------------- + 1001 | @ 12 hours ago | t | 1 | 1 | 0 | 0 +(1 row) + +-- should have refreshed everything we have so far +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 + 6 | 13 + 8 | 17 + 10 | 10 +(6 rows) + +-- invalidate some data +UPDATE test_continuous_agg_table +SET data = 11 WHERE time = 6; +--advance time by 12h so that job runs one more time +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25, 25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms + 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker + 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | deleted 1 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms +(12 rows) + +SELECT job_id, next_start - last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1001 | @ 12 hours | t | 2 | 2 | 0 | 0 +(1 row) + +-- should have updated data for time=6 +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 + 6 | 18 + 8 | 17 + 10 | 10 +(6 rows) + +\x on +--check the information views -- +select view_name, view_owner, materialization_hypertable_schema, materialization_hypertable_name +from timescaledb_information.continuous_aggregates +where view_name::text like '%test_continuous_agg_view'; +-[ RECORD 1 ]---------------------+--------------------------- +view_name | test_continuous_agg_view +view_owner | default_perm_user +materialization_hypertable_schema | _timescaledb_internal +materialization_hypertable_name | _materialized_hypertable_3 + +select view_name, view_definition from timescaledb_information.continuous_aggregates +where view_name::text like '%test_continuous_agg_view'; +-[ RECORD 1 ]---+------------------------------------------------------------------------- +view_name | test_continuous_agg_view +view_definition | SELECT time_bucket(2, test_continuous_agg_table."time") AS time_bucket,+ + | sum(test_continuous_agg_table.data) AS value + + | FROM test_continuous_agg_table + + | GROUP BY (time_bucket(2, test_continuous_agg_table."time")); + +select job_status, last_run_duration +from timescaledb_information.job_stats ps, timescaledb_information.continuous_aggregates cagg +where cagg.view_name::text like '%test_continuous_agg_view' +and cagg.materialization_hypertable_name = ps.hypertable_name; +-[ RECORD 1 ]-----+---------- +job_status | Scheduled +last_run_duration | + +\x off +DROP MATERIALIZED VIEW test_continuous_agg_view; +psql:include/cagg_bgw_common.sql:294: NOTICE: drop cascades to table _timescaledb_internal._hyper_3_4_chunk +--create a view with a function that it has no permission to execute +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value, get_constant_no_perms() + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', 100::integer, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +SELECT id AS job_id FROM _timescaledb_config.bgw_job ORDER BY id desc limit 1 \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +-- job fails +SELECT job_id, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------------+------------+-----------------+----------------+--------------- + 1002 | f | 1 | 0 | 1 | 0 +(1 row) + +DROP MATERIALIZED VIEW test_continuous_agg_view; +--advance clock to quit scheduler +SELECT ts_bgw_params_reset_time(extract(epoch from interval '25 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +select ts_bgw_db_scheduler_test_wait_for_scheduler_finish(); + ts_bgw_db_scheduler_test_wait_for_scheduler_finish +---------------------------------------------------- + +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_ON_JOB); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +--clear log for next run of the scheduler +TRUNCATE public.bgw_log; +SELECT ts_bgw_params_reset_time(); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +-- +-- Test creating continuous aggregate with a user that is the non-owner of the raw table +-- +CREATE TABLE test_continuous_agg_table_w_grant(time int, data int); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_continuous_agg_table_w_grant', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_bgw_common.sql:330: NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +------------------------------------------------ + (5,public,test_continuous_agg_table_w_grant,t) +(1 row) + +\else +SELECT create_hypertable('test_continuous_agg_table_w_grant', 'time', chunk_time_interval => 10); +\endif +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table_w_grant $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table_w_grant $$; +$DIST$); +\endif +SELECT set_integer_now_func('test_continuous_agg_table_w_grant', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +GRANT SELECT, TRIGGER ON test_continuous_agg_table_w_grant TO public; +INSERT INTO test_continuous_agg_table_w_grant + SELECT 1 , 1; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +-- make sure view can be created +CREATE MATERIALIZED VIEW test_continuous_agg_view_user_2 + WITH ( timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table_w_grant + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view_user_2', NULL, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +SELECT id AS job_id FROM _timescaledb_config.bgw_job ORDER BY id desc limit 1 \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT id, owner FROM _timescaledb_config.bgw_job WHERE id = :job_id ; + id | owner +------+--------------------- + 1003 | default_perm_user_2 +(1 row) + +SELECT job_id, next_start - last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1003 | @ 12 hours | t | 1 | 1 | 0 | 0 +(1 row) + +--view is populated +SELECT * FROM test_continuous_agg_view_user_2 ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +--revoke permissions from the continuous agg view owner to select from raw table +--no further updates to cont agg should happen +REVOKE SELECT ON test_continuous_agg_table_w_grant FROM public; +--add new data to table +INSERT INTO test_continuous_agg_table_w_grant VALUES(5,1); +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +--advance time by 12h so that job tries to run one more time +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25, 25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +--should show a failing execution because no longer has permissions (due to lack of permission on partial view owner's part) +SELECT job_id, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------------+------------+-----------------+----------------+--------------- + 1003 | f | 2 | 1 | 1 | 0 +(1 row) + +--view was NOT updated; but the old stuff is still there +SELECT * FROM test_continuous_agg_view_user_2; + time_bucket | value +-------------+------- + 0 | 1 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT * from sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1003] | refreshing continuous aggregate "test_continuous_agg_view_user_2" in window [ -2147483648, 2 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1003] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_6" + 2 | 0 | Refresh Continuous Aggregate Policy [1003] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_6" + 3 | 0 | Refresh Continuous Aggregate Policy [1003] | job 1003 (Refresh Continuous Aggregate Policy [1003]) exiting with success: execution time (RANDOM) ms + 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker + 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | job 1003 threw an error + 1 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | permission denied for table test_continuous_agg_table_w_grant +(10 rows) + +-- Count the number of continuous aggregate policies +SELECT count(*) FROM _timescaledb_config.bgw_job + WHERE proc_schema = '_timescaledb_functions' + AND proc_name = 'policy_refresh_continuous_aggregate'; + count +------- + 1 +(1 row) + +-- cleanup +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +DROP DATABASE :DATA_NODE_1 WITH (FORCE); +DROP DATABASE :DATA_NODE_2 WITH (FORCE); +DROP DATABASE :DATA_NODE_3 WITH (FORCE); diff --git a/tsl/test/expected/cagg_bgw_dist_ht-15.out b/tsl/test/expected/cagg_bgw_dist_ht-15.out new file mode 100644 index 00000000000..e7ca0d9ee6c --- /dev/null +++ b/tsl/test/expected/cagg_bgw_dist_ht-15.out @@ -0,0 +1,744 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +------------------------------------ +-- Set up a distributed environment +------------------------------------ +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SELECT node_name, database, node_created, database_created, extension_created +FROM ( + SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* + FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) +) a; + node_name | database | node_created | database_created | extension_created +-----------------------+-----------------------+--------------+------------------+------------------- + db_cagg_bgw_dist_ht_1 | db_cagg_bgw_dist_ht_1 | t | t | t + db_cagg_bgw_dist_ht_2 | db_cagg_bgw_dist_ht_2 | t | t | t + db_cagg_bgw_dist_ht_3 | db_cagg_bgw_dist_ht_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +\set IS_DISTRIBUTED TRUE +\ir include/cagg_bgw_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- +-- Setup +-- +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(timeout INT = -1, mock_start_time INT = 0) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run(timeout INT = -1, mock_start_time INT = 0) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_wait_for_scheduler_finish() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_create() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_destroy() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_reset_time(set_time BIGINT = 0, wait BOOLEAN = false) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +--test that this all works under the community license +ALTER DATABASE :TEST_DBNAME SET timescaledb.license_key='Community'; +--create a function with no permissions to execute +CREATE FUNCTION get_constant_no_perms() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS +$BODY$ + SELECT 10; +$BODY$; +REVOKE EXECUTE ON FUNCTION get_constant_no_perms() FROM PUBLIC; +\set WAIT_ON_JOB 0 +\set IMMEDIATELY_SET_UNTIL 1 +\set WAIT_FOR_OTHER_TO_ADVANCE 2 +CREATE OR REPLACE FUNCTION ts_bgw_params_mock_wait_returns_immediately(new_val INTEGER) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +-- Remove any default jobs, e.g., telemetry +DELETE FROM _timescaledb_config.bgw_job WHERE TRUE; +TRUNCATE _timescaledb_internal.bgw_job_stat; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE public.bgw_log( + msg_no INT, + mock_time BIGINT, + application_name TEXT, + msg TEXT +); +CREATE VIEW sorted_bgw_log AS + SELECT msg_no, + mock_time, + application_name, + regexp_replace(regexp_replace(msg, '(Wait until|started at|execution time) [0-9]+(\.[0-9]+)?', '\1 (RANDOM)', 'g'), 'background worker "[^"]+"','connection') AS msg + FROM bgw_log ORDER BY mock_time, application_name COLLATE "C", msg_no; +CREATE TABLE public.bgw_dsm_handle_store( + handle BIGINT +); +INSERT INTO public.bgw_dsm_handle_store VALUES (0); +SELECT ts_bgw_params_create(); + ts_bgw_params_create +---------------------- + +(1 row) + +SELECT * FROM _timescaledb_config.bgw_job; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+---------- +(0 rows) + +SELECT * FROM timescaledb_information.job_stats; + hypertable_schema | hypertable_name | job_id | last_run_started_at | last_successful_finish | last_run_status | job_status | last_run_duration | next_start | total_runs | total_successes | total_failures +-------------------+-----------------+--------+---------------------+------------------------+-----------------+------------+-------------------+------------+------------+-----------------+---------------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.continuous_agg; + mat_hypertable_id | raw_hypertable_id | parent_mat_hypertable_id | user_view_schema | user_view_name | partial_view_schema | partial_view_name | bucket_width | direct_view_schema | direct_view_name | materialized_only | finalized +-------------------+-------------------+--------------------------+------------------+----------------+---------------------+-------------------+--------------+--------------------+------------------+-------------------+----------- +(0 rows) + +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +psql:include/cagg_bgw_common.sql:76: WARNING: no privileges were granted for "public" +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE test_continuous_agg_table(time int, data int); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_continuous_agg_table', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_bgw_common.sql:80: NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +---------------------------------------- + (1,public,test_continuous_agg_table,t) +(1 row) + +\else +SELECT create_hypertable('test_continuous_agg_table', 'time', chunk_time_interval => 10); +\endif +CREATE OR REPLACE FUNCTION integer_now_test() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('test_continuous_agg_table', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', NULL, 4::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1000 +(1 row) + +SELECT id as raw_table_id FROM _timescaledb_catalog.hypertable WHERE table_name='test_continuous_agg_table' \gset +-- min distance from end should be 1 +SELECT mat_hypertable_id, user_view_schema, user_view_name, bucket_width +FROM _timescaledb_catalog.continuous_agg; + mat_hypertable_id | user_view_schema | user_view_name | bucket_width +-------------------+------------------+--------------------------+-------------- + 2 | public | test_continuous_agg_view | 2 +(1 row) + +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg \gset +SELECT id AS job_id FROM _timescaledb_config.bgw_job where hypertable_id=:mat_hypertable_id \gset +-- job was created +SELECT * FROM _timescaledb_config.bgw_job where hypertable_id=:mat_hypertable_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------------------+------------------------+-------------------------------------------+---------- + 1000 | Refresh Continuous Aggregate Policy [1000] | @ 12 hours | @ 0 | -1 | @ 12 hours | _timescaledb_functions | policy_refresh_continuous_aggregate | default_perm_user | t | f | | 2 | {"end_offset": 4, "start_offset": null, "mat_hypertable_id": 2} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | +(1 row) + +-- create 10 time buckets +INSERT INTO test_continuous_agg_table + SELECT i, i FROM + (SELECT generate_series(0, 10) as i) AS j; +-- no stats +SELECT job_id, next_start, last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + ORDER BY job_id; + job_id | next_start | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------+------------------+------------+-----------------+----------------+--------------- +(0 rows) + +-- no data in view +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- +(0 rows) + +-- run first time +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1000] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -2147483648, 6 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1000] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_2" + 2 | 0 | Refresh Continuous Aggregate Policy [1000] | inserted 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_2" + 3 | 0 | Refresh Continuous Aggregate Policy [1000] | job 1000 (Refresh Continuous Aggregate Policy [1000]) exiting with success: execution time (RANDOM) ms +(6 rows) + +SELECT * FROM _timescaledb_config.bgw_job where id=:job_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------------------+------------------------+-------------------------------------------+---------- + 1000 | Refresh Continuous Aggregate Policy [1000] | @ 12 hours | @ 0 | -1 | @ 12 hours | _timescaledb_functions | policy_refresh_continuous_aggregate | default_perm_user | t | f | | 2 | {"end_offset": 4, "start_offset": null, "mat_hypertable_id": 2} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | +(1 row) + +-- job ran once, successfully +SELECT job_id, next_start-last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1000 | @ 12 hours | t | 1 | 1 | 0 | 0 +(1 row) + +--clear log for next run of scheduler. +TRUNCATE public.bgw_log; +CREATE FUNCTION wait_for_timer_to_run(started_at INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + num_runs INTEGER; + message TEXT; +BEGIN + select format('[TESTING] Wait until %%, started at %s', started_at) into message; + FOR i in 1..spins + LOOP + SELECT COUNT(*) from bgw_log where msg LIKE message INTO num_runs; + if (num_runs > 0) THEN + RETURN true; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RETURN false; +END +$BODY$; +CREATE FUNCTION wait_for_job_to_run(job_param_id INTEGER, expected_runs INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + num_runs INTEGER; +BEGIN + FOR i in 1..spins + LOOP + SELECT total_successes FROM _timescaledb_internal.bgw_job_stat WHERE job_id=job_param_id INTO num_runs; + if (num_runs = expected_runs) THEN + RETURN true; + ELSEIF (num_runs > expected_runs) THEN + RAISE 'num_runs > expected'; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RETURN false; +END +$BODY$; +--make sure there is 1 job to start with +SELECT wait_for_job_to_run(:job_id, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_FOR_OTHER_TO_ADVANCE); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +--start the scheduler on 0 time +SELECT ts_bgw_params_reset_time(0, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run(extract(epoch from interval '24 hour')::int * 1000, 0); + ts_bgw_db_scheduler_test_run +------------------------------ + +(1 row) + +SELECT wait_for_timer_to_run(0); + wait_for_timer_to_run +----------------------- + t +(1 row) + +--advance to 12:00 so that it runs one more time; now we know the +--scheduler has loaded up the job with the old schedule_interval +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 2); + wait_for_job_to_run +--------------------- + t +(1 row) + +--advance clock 1us to make the scheduler realize the job is done +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+1, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +--alter the refresh interval and check if next_start is altered +SELECT alter_job(:job_id, schedule_interval => '1m', retry_period => '1m'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 min","@ 0",-1,"@ 1 min",t,"{""end_offset"": 4, ""start_offset"": null, ""mat_hypertable_id"": 2}","Sat Jan 01 04:01:00 2000 PST",_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT job_id, next_start - last_finish as until_next, total_runs +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id;; + job_id | until_next | total_runs +--------+------------+------------ + 1000 | @ 1 min | 2 +(1 row) + +--advance to 12:02, job should have run at 12:01 +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+(extract(epoch from interval '2 minute')::bigint * 1000000), true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 3); + wait_for_job_to_run +--------------------- + t +(1 row) + +--next run in 1 minute +SELECT job_id, next_start-last_finish as until_next, total_runs +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id; + job_id | until_next | total_runs +--------+------------+------------ + 1000 | @ 1 min | 3 +(1 row) + +--change next run to be after 30s instead +SELECT (next_start - '30s'::interval) AS "NEW_NEXT_START" +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id \gset +SELECT alter_job(:job_id, next_start => :'NEW_NEXT_START'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 min","@ 0",-1,"@ 1 min",t,"{""end_offset"": 4, ""start_offset"": null, ""mat_hypertable_id"": 2}","Sat Jan 01 04:02:30 2000 PST",_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+(extract(epoch from interval '2 minute 30 seconds')::bigint * 1000000), true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 4); + wait_for_job_to_run +--------------------- + t +(1 row) + +--advance clock to quit scheduler +SELECT ts_bgw_params_reset_time(extract(epoch from interval '25 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +select ts_bgw_db_scheduler_test_wait_for_scheduler_finish(); + ts_bgw_db_scheduler_test_wait_for_scheduler_finish +---------------------------------------------------- + +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_ON_JOB); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +TRUNCATE public.bgw_log; +-- data before 8 +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 +(3 rows) + +-- invalidations test by running job multiple times +SELECT ts_bgw_params_reset_time(); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +DROP MATERIALIZED VIEW test_continuous_agg_view; +psql:include/cagg_bgw_common.sql:234: NOTICE: drop cascades to table _timescaledb_internal._hyper_2_3_chunk +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', 100::integer, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1001 +(1 row) + +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg \gset +SELECT id AS job_id FROM _timescaledb_config.bgw_job WHERE hypertable_id=:mat_hypertable_id \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms +(6 rows) + +-- job ran once, successfully +SELECT job_id, last_finish - next_start as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+----------------+------------------+------------+-----------------+----------------+--------------- + 1001 | @ 12 hours ago | t | 1 | 1 | 0 | 0 +(1 row) + +-- should have refreshed everything we have so far +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 + 6 | 13 + 8 | 17 + 10 | 10 +(6 rows) + +-- invalidate some data +UPDATE test_continuous_agg_table +SET data = 11 WHERE time = 6; +--advance time by 12h so that job runs one more time +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25, 25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms + 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker + 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | deleted 1 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms +(12 rows) + +SELECT job_id, next_start - last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1001 | @ 12 hours | t | 2 | 2 | 0 | 0 +(1 row) + +-- should have updated data for time=6 +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 + 6 | 18 + 8 | 17 + 10 | 10 +(6 rows) + +\x on +--check the information views -- +select view_name, view_owner, materialization_hypertable_schema, materialization_hypertable_name +from timescaledb_information.continuous_aggregates +where view_name::text like '%test_continuous_agg_view'; +-[ RECORD 1 ]---------------------+--------------------------- +view_name | test_continuous_agg_view +view_owner | default_perm_user +materialization_hypertable_schema | _timescaledb_internal +materialization_hypertable_name | _materialized_hypertable_3 + +select view_name, view_definition from timescaledb_information.continuous_aggregates +where view_name::text like '%test_continuous_agg_view'; +-[ RECORD 1 ]---+------------------------------------------------------------------------- +view_name | test_continuous_agg_view +view_definition | SELECT time_bucket(2, test_continuous_agg_table."time") AS time_bucket,+ + | sum(test_continuous_agg_table.data) AS value + + | FROM test_continuous_agg_table + + | GROUP BY (time_bucket(2, test_continuous_agg_table."time")); + +select job_status, last_run_duration +from timescaledb_information.job_stats ps, timescaledb_information.continuous_aggregates cagg +where cagg.view_name::text like '%test_continuous_agg_view' +and cagg.materialization_hypertable_name = ps.hypertable_name; +-[ RECORD 1 ]-----+---------- +job_status | Scheduled +last_run_duration | + +\x off +DROP MATERIALIZED VIEW test_continuous_agg_view; +psql:include/cagg_bgw_common.sql:294: NOTICE: drop cascades to table _timescaledb_internal._hyper_3_4_chunk +--create a view with a function that it has no permission to execute +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value, get_constant_no_perms() + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', 100::integer, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +SELECT id AS job_id FROM _timescaledb_config.bgw_job ORDER BY id desc limit 1 \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +-- job fails +SELECT job_id, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------------+------------+-----------------+----------------+--------------- + 1002 | f | 1 | 0 | 1 | 0 +(1 row) + +DROP MATERIALIZED VIEW test_continuous_agg_view; +--advance clock to quit scheduler +SELECT ts_bgw_params_reset_time(extract(epoch from interval '25 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +select ts_bgw_db_scheduler_test_wait_for_scheduler_finish(); + ts_bgw_db_scheduler_test_wait_for_scheduler_finish +---------------------------------------------------- + +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_ON_JOB); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +--clear log for next run of the scheduler +TRUNCATE public.bgw_log; +SELECT ts_bgw_params_reset_time(); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +-- +-- Test creating continuous aggregate with a user that is the non-owner of the raw table +-- +CREATE TABLE test_continuous_agg_table_w_grant(time int, data int); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_continuous_agg_table_w_grant', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_bgw_common.sql:330: NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +------------------------------------------------ + (5,public,test_continuous_agg_table_w_grant,t) +(1 row) + +\else +SELECT create_hypertable('test_continuous_agg_table_w_grant', 'time', chunk_time_interval => 10); +\endif +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table_w_grant $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table_w_grant $$; +$DIST$); +\endif +SELECT set_integer_now_func('test_continuous_agg_table_w_grant', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +GRANT SELECT, TRIGGER ON test_continuous_agg_table_w_grant TO public; +INSERT INTO test_continuous_agg_table_w_grant + SELECT 1 , 1; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +-- make sure view can be created +CREATE MATERIALIZED VIEW test_continuous_agg_view_user_2 + WITH ( timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table_w_grant + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view_user_2', NULL, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +SELECT id AS job_id FROM _timescaledb_config.bgw_job ORDER BY id desc limit 1 \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT id, owner FROM _timescaledb_config.bgw_job WHERE id = :job_id ; + id | owner +------+--------------------- + 1003 | default_perm_user_2 +(1 row) + +SELECT job_id, next_start - last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1003 | @ 12 hours | t | 1 | 1 | 0 | 0 +(1 row) + +--view is populated +SELECT * FROM test_continuous_agg_view_user_2 ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +--revoke permissions from the continuous agg view owner to select from raw table +--no further updates to cont agg should happen +REVOKE SELECT ON test_continuous_agg_table_w_grant FROM public; +--add new data to table +INSERT INTO test_continuous_agg_table_w_grant VALUES(5,1); +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +--advance time by 12h so that job tries to run one more time +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25, 25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +--should show a failing execution because no longer has permissions (due to lack of permission on partial view owner's part) +SELECT job_id, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------------+------------+-----------------+----------------+--------------- + 1003 | f | 2 | 1 | 1 | 0 +(1 row) + +--view was NOT updated; but the old stuff is still there +SELECT * FROM test_continuous_agg_view_user_2; + time_bucket | value +-------------+------- + 0 | 1 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT * from sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1003] | refreshing continuous aggregate "test_continuous_agg_view_user_2" in window [ -2147483648, 2 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1003] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_6" + 2 | 0 | Refresh Continuous Aggregate Policy [1003] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_6" + 3 | 0 | Refresh Continuous Aggregate Policy [1003] | job 1003 (Refresh Continuous Aggregate Policy [1003]) exiting with success: execution time (RANDOM) ms + 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker + 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | job 1003 threw an error + 1 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | permission denied for table test_continuous_agg_table_w_grant +(10 rows) + +-- Count the number of continuous aggregate policies +SELECT count(*) FROM _timescaledb_config.bgw_job + WHERE proc_schema = '_timescaledb_functions' + AND proc_name = 'policy_refresh_continuous_aggregate'; + count +------- + 1 +(1 row) + +-- cleanup +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +DROP DATABASE :DATA_NODE_1 WITH (FORCE); +DROP DATABASE :DATA_NODE_2 WITH (FORCE); +DROP DATABASE :DATA_NODE_3 WITH (FORCE); diff --git a/tsl/test/expected/cagg_bgw_dist_ht-16.out b/tsl/test/expected/cagg_bgw_dist_ht-16.out new file mode 100644 index 00000000000..1051d4f21ca --- /dev/null +++ b/tsl/test/expected/cagg_bgw_dist_ht-16.out @@ -0,0 +1,744 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +------------------------------------ +-- Set up a distributed environment +------------------------------------ +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SELECT node_name, database, node_created, database_created, extension_created +FROM ( + SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* + FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) +) a; + node_name | database | node_created | database_created | extension_created +-----------------------+-----------------------+--------------+------------------+------------------- + db_cagg_bgw_dist_ht_1 | db_cagg_bgw_dist_ht_1 | t | t | t + db_cagg_bgw_dist_ht_2 | db_cagg_bgw_dist_ht_2 | t | t | t + db_cagg_bgw_dist_ht_3 | db_cagg_bgw_dist_ht_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +\set IS_DISTRIBUTED TRUE +\ir include/cagg_bgw_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- +-- Setup +-- +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(timeout INT = -1, mock_start_time INT = 0) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run(timeout INT = -1, mock_start_time INT = 0) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_wait_for_scheduler_finish() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_create() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_destroy() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_reset_time(set_time BIGINT = 0, wait BOOLEAN = false) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +--test that this all works under the community license +ALTER DATABASE :TEST_DBNAME SET timescaledb.license_key='Community'; +--create a function with no permissions to execute +CREATE FUNCTION get_constant_no_perms() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS +$BODY$ + SELECT 10; +$BODY$; +REVOKE EXECUTE ON FUNCTION get_constant_no_perms() FROM PUBLIC; +\set WAIT_ON_JOB 0 +\set IMMEDIATELY_SET_UNTIL 1 +\set WAIT_FOR_OTHER_TO_ADVANCE 2 +CREATE OR REPLACE FUNCTION ts_bgw_params_mock_wait_returns_immediately(new_val INTEGER) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +-- Remove any default jobs, e.g., telemetry +DELETE FROM _timescaledb_config.bgw_job WHERE TRUE; +TRUNCATE _timescaledb_internal.bgw_job_stat; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE public.bgw_log( + msg_no INT, + mock_time BIGINT, + application_name TEXT, + msg TEXT +); +CREATE VIEW sorted_bgw_log AS + SELECT msg_no, + mock_time, + application_name, + regexp_replace(regexp_replace(msg, '(Wait until|started at|execution time) [0-9]+(\.[0-9]+)?', '\1 (RANDOM)', 'g'), 'background worker "[^"]+"','connection') AS msg + FROM bgw_log ORDER BY mock_time, application_name COLLATE "C", msg_no; +CREATE TABLE public.bgw_dsm_handle_store( + handle BIGINT +); +INSERT INTO public.bgw_dsm_handle_store VALUES (0); +SELECT ts_bgw_params_create(); + ts_bgw_params_create +---------------------- + +(1 row) + +SELECT * FROM _timescaledb_config.bgw_job; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+---------- +(0 rows) + +SELECT * FROM timescaledb_information.job_stats; + hypertable_schema | hypertable_name | job_id | last_run_started_at | last_successful_finish | last_run_status | job_status | last_run_duration | next_start | total_runs | total_successes | total_failures +-------------------+-----------------+--------+---------------------+------------------------+-----------------+------------+-------------------+------------+------------+-----------------+---------------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.continuous_agg; + mat_hypertable_id | raw_hypertable_id | parent_mat_hypertable_id | user_view_schema | user_view_name | partial_view_schema | partial_view_name | bucket_width | direct_view_schema | direct_view_name | materialized_only | finalized +-------------------+-------------------+--------------------------+------------------+----------------+---------------------+-------------------+--------------+--------------------+------------------+-------------------+----------- +(0 rows) + +-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes +GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; +psql:include/cagg_bgw_common.sql:76: WARNING: no privileges were granted for "public" +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE test_continuous_agg_table(time int, data int); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_continuous_agg_table', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_bgw_common.sql:80: NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +---------------------------------------- + (1,public,test_continuous_agg_table,t) +(1 row) + +\else +SELECT create_hypertable('test_continuous_agg_table', 'time', chunk_time_interval => 10); +\endif +CREATE OR REPLACE FUNCTION integer_now_test() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table $$; +$DIST$); +\endif +SELECT set_integer_now_func('test_continuous_agg_table', 'integer_now_test'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', NULL, 4::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1000 +(1 row) + +SELECT id as raw_table_id FROM _timescaledb_catalog.hypertable WHERE table_name='test_continuous_agg_table' \gset +-- min distance from end should be 1 +SELECT mat_hypertable_id, user_view_schema, user_view_name, bucket_width +FROM _timescaledb_catalog.continuous_agg; + mat_hypertable_id | user_view_schema | user_view_name | bucket_width +-------------------+------------------+--------------------------+-------------- + 2 | public | test_continuous_agg_view | 2 +(1 row) + +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg \gset +SELECT id AS job_id FROM _timescaledb_config.bgw_job where hypertable_id=:mat_hypertable_id \gset +-- job was created +SELECT * FROM _timescaledb_config.bgw_job where hypertable_id=:mat_hypertable_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------------------+------------------------+-------------------------------------------+---------- + 1000 | Refresh Continuous Aggregate Policy [1000] | @ 12 hours | @ 0 | -1 | @ 12 hours | _timescaledb_functions | policy_refresh_continuous_aggregate | default_perm_user | t | f | | 2 | {"end_offset": 4, "start_offset": null, "mat_hypertable_id": 2} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | +(1 row) + +-- create 10 time buckets +INSERT INTO test_continuous_agg_table + SELECT i, i FROM + (SELECT generate_series(0, 10) as i) AS j; +-- no stats +SELECT job_id, next_start, last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + ORDER BY job_id; + job_id | next_start | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------+------------------+------------+-----------------+----------------+--------------- +(0 rows) + +-- no data in view +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- +(0 rows) + +-- run first time +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1000] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -2147483648, 6 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1000] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_2" + 2 | 0 | Refresh Continuous Aggregate Policy [1000] | inserted 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_2" + 3 | 0 | Refresh Continuous Aggregate Policy [1000] | job 1000 (Refresh Continuous Aggregate Policy [1000]) exiting with success: execution time (RANDOM) ms +(6 rows) + +SELECT * FROM _timescaledb_config.bgw_job where id=:job_id; + id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone +------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------------------+------------------------+-------------------------------------------+---------- + 1000 | Refresh Continuous Aggregate Policy [1000] | @ 12 hours | @ 0 | -1 | @ 12 hours | _timescaledb_functions | policy_refresh_continuous_aggregate | default_perm_user | t | f | | 2 | {"end_offset": 4, "start_offset": null, "mat_hypertable_id": 2} | _timescaledb_functions | policy_refresh_continuous_aggregate_check | +(1 row) + +-- job ran once, successfully +SELECT job_id, next_start-last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1000 | @ 12 hours | t | 1 | 1 | 0 | 0 +(1 row) + +--clear log for next run of scheduler. +TRUNCATE public.bgw_log; +CREATE FUNCTION wait_for_timer_to_run(started_at INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + num_runs INTEGER; + message TEXT; +BEGIN + select format('[TESTING] Wait until %%, started at %s', started_at) into message; + FOR i in 1..spins + LOOP + SELECT COUNT(*) from bgw_log where msg LIKE message INTO num_runs; + if (num_runs > 0) THEN + RETURN true; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RETURN false; +END +$BODY$; +CREATE FUNCTION wait_for_job_to_run(job_param_id INTEGER, expected_runs INTEGER, spins INTEGER=:TEST_SPINWAIT_ITERS) RETURNS BOOLEAN LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + num_runs INTEGER; +BEGIN + FOR i in 1..spins + LOOP + SELECT total_successes FROM _timescaledb_internal.bgw_job_stat WHERE job_id=job_param_id INTO num_runs; + if (num_runs = expected_runs) THEN + RETURN true; + ELSEIF (num_runs > expected_runs) THEN + RAISE 'num_runs > expected'; + ELSE + PERFORM pg_sleep(0.1); + END IF; + END LOOP; + RETURN false; +END +$BODY$; +--make sure there is 1 job to start with +SELECT wait_for_job_to_run(:job_id, 1); + wait_for_job_to_run +--------------------- + t +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_FOR_OTHER_TO_ADVANCE); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +--start the scheduler on 0 time +SELECT ts_bgw_params_reset_time(0, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run(extract(epoch from interval '24 hour')::int * 1000, 0); + ts_bgw_db_scheduler_test_run +------------------------------ + +(1 row) + +SELECT wait_for_timer_to_run(0); + wait_for_timer_to_run +----------------------- + t +(1 row) + +--advance to 12:00 so that it runs one more time; now we know the +--scheduler has loaded up the job with the old schedule_interval +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 2); + wait_for_job_to_run +--------------------- + t +(1 row) + +--advance clock 1us to make the scheduler realize the job is done +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+1, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +--alter the refresh interval and check if next_start is altered +SELECT alter_job(:job_id, schedule_interval => '1m', retry_period => '1m'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 min","@ 0",-1,"@ 1 min",t,"{""end_offset"": 4, ""start_offset"": null, ""mat_hypertable_id"": 2}","Sat Jan 01 04:01:00 2000 PST",_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT job_id, next_start - last_finish as until_next, total_runs +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id;; + job_id | until_next | total_runs +--------+------------+------------ + 1000 | @ 1 min | 2 +(1 row) + +--advance to 12:02, job should have run at 12:01 +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+(extract(epoch from interval '2 minute')::bigint * 1000000), true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 3); + wait_for_job_to_run +--------------------- + t +(1 row) + +--next run in 1 minute +SELECT job_id, next_start-last_finish as until_next, total_runs +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id; + job_id | until_next | total_runs +--------+------------+------------ + 1000 | @ 1 min | 3 +(1 row) + +--change next run to be after 30s instead +SELECT (next_start - '30s'::interval) AS "NEW_NEXT_START" +FROM _timescaledb_internal.bgw_job_stat +WHERE job_id=:job_id \gset +SELECT alter_job(:job_id, next_start => :'NEW_NEXT_START'); + alter_job +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (1000,"@ 1 min","@ 0",-1,"@ 1 min",t,"{""end_offset"": 4, ""start_offset"": null, ""mat_hypertable_id"": 2}","Sat Jan 01 04:02:30 2000 PST",_timescaledb_functions.policy_refresh_continuous_aggregate_check,f,,) +(1 row) + +SELECT ts_bgw_params_reset_time((extract(epoch from interval '12 hour')::bigint * 1000000)+(extract(epoch from interval '2 minute 30 seconds')::bigint * 1000000), true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT wait_for_job_to_run(:job_id, 4); + wait_for_job_to_run +--------------------- + t +(1 row) + +--advance clock to quit scheduler +SELECT ts_bgw_params_reset_time(extract(epoch from interval '25 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +select ts_bgw_db_scheduler_test_wait_for_scheduler_finish(); + ts_bgw_db_scheduler_test_wait_for_scheduler_finish +---------------------------------------------------- + +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_ON_JOB); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +TRUNCATE public.bgw_log; +-- data before 8 +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 +(3 rows) + +-- invalidations test by running job multiple times +SELECT ts_bgw_params_reset_time(); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +DROP MATERIALIZED VIEW test_continuous_agg_view; +psql:include/cagg_bgw_common.sql:234: NOTICE: drop cascades to table _timescaledb_internal._hyper_2_3_chunk +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', 100::integer, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1001 +(1 row) + +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg \gset +SELECT id AS job_id FROM _timescaledb_config.bgw_job WHERE hypertable_id=:mat_hypertable_id \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms +(6 rows) + +-- job ran once, successfully +SELECT job_id, last_finish - next_start as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+----------------+------------------+------------+-----------------+----------------+--------------- + 1001 | @ 12 hours ago | t | 1 | 1 | 0 | 0 +(1 row) + +-- should have refreshed everything we have so far +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 + 6 | 13 + 8 | 17 + 10 | 10 +(6 rows) + +-- invalidate some data +UPDATE test_continuous_agg_table +SET data = 11 WHERE time = 6; +--advance time by 12h so that job runs one more time +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25, 25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT * FROM sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms + 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker + 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] + 1 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | deleted 1 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" + 2 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" + 3 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms +(12 rows) + +SELECT job_id, next_start - last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1001 | @ 12 hours | t | 2 | 2 | 0 | 0 +(1 row) + +-- should have updated data for time=6 +SELECT * FROM test_continuous_agg_view ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 + 2 | 5 + 4 | 9 + 6 | 18 + 8 | 17 + 10 | 10 +(6 rows) + +\x on +--check the information views -- +select view_name, view_owner, materialization_hypertable_schema, materialization_hypertable_name +from timescaledb_information.continuous_aggregates +where view_name::text like '%test_continuous_agg_view'; +-[ RECORD 1 ]---------------------+--------------------------- +view_name | test_continuous_agg_view +view_owner | default_perm_user +materialization_hypertable_schema | _timescaledb_internal +materialization_hypertable_name | _materialized_hypertable_3 + +select view_name, view_definition from timescaledb_information.continuous_aggregates +where view_name::text like '%test_continuous_agg_view'; +-[ RECORD 1 ]---+----------------------------------------------- +view_name | test_continuous_agg_view +view_definition | SELECT time_bucket(2, "time") AS time_bucket,+ + | sum(data) AS value + + | FROM test_continuous_agg_table + + | GROUP BY (time_bucket(2, "time")); + +select job_status, last_run_duration +from timescaledb_information.job_stats ps, timescaledb_information.continuous_aggregates cagg +where cagg.view_name::text like '%test_continuous_agg_view' +and cagg.materialization_hypertable_name = ps.hypertable_name; +-[ RECORD 1 ]-----+---------- +job_status | Scheduled +last_run_duration | + +\x off +DROP MATERIALIZED VIEW test_continuous_agg_view; +psql:include/cagg_bgw_common.sql:294: NOTICE: drop cascades to table _timescaledb_internal._hyper_3_4_chunk +--create a view with a function that it has no permission to execute +CREATE MATERIALIZED VIEW test_continuous_agg_view + WITH (timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value, get_constant_no_perms() + FROM test_continuous_agg_table + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view', 100::integer, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +SELECT id AS job_id FROM _timescaledb_config.bgw_job ORDER BY id desc limit 1 \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +-- job fails +SELECT job_id, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------------+------------+-----------------+----------------+--------------- + 1002 | f | 1 | 0 | 1 | 0 +(1 row) + +DROP MATERIALIZED VIEW test_continuous_agg_view; +--advance clock to quit scheduler +SELECT ts_bgw_params_reset_time(extract(epoch from interval '25 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +select ts_bgw_db_scheduler_test_wait_for_scheduler_finish(); + ts_bgw_db_scheduler_test_wait_for_scheduler_finish +---------------------------------------------------- + +(1 row) + +SELECT ts_bgw_params_mock_wait_returns_immediately(:WAIT_ON_JOB); + ts_bgw_params_mock_wait_returns_immediately +--------------------------------------------- + +(1 row) + +--clear log for next run of the scheduler +TRUNCATE public.bgw_log; +SELECT ts_bgw_params_reset_time(); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +-- +-- Test creating continuous aggregate with a user that is the non-owner of the raw table +-- +CREATE TABLE test_continuous_agg_table_w_grant(time int, data int); +\if :IS_DISTRIBUTED +SELECT create_distributed_hypertable('test_continuous_agg_table_w_grant', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_bgw_common.sql:330: NOTICE: adding not-null constraint to column "time" + create_distributed_hypertable +------------------------------------------------ + (5,public,test_continuous_agg_table_w_grant,t) +(1 row) + +\else +SELECT create_hypertable('test_continuous_agg_table_w_grant', 'time', chunk_time_interval => 10); +\endif +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table_w_grant $$; +\if :IS_DISTRIBUTED +CALL distributed_exec($DIST$ +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table_w_grant $$; +$DIST$); +\endif +SELECT set_integer_now_func('test_continuous_agg_table_w_grant', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +GRANT SELECT, TRIGGER ON test_continuous_agg_table_w_grant TO public; +INSERT INTO test_continuous_agg_table_w_grant + SELECT 1 , 1; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +-- make sure view can be created +CREATE MATERIALIZED VIEW test_continuous_agg_view_user_2 + WITH ( timescaledb.continuous, + timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM test_continuous_agg_table_w_grant + GROUP BY 1 WITH NO DATA; +SELECT add_continuous_aggregate_policy('test_continuous_agg_view_user_2', NULL, -2::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +SELECT id AS job_id FROM _timescaledb_config.bgw_job ORDER BY id desc limit 1 \gset +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT id, owner FROM _timescaledb_config.bgw_job WHERE id = :job_id ; + id | owner +------+--------------------- + 1003 | default_perm_user_2 +(1 row) + +SELECT job_id, next_start - last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | until_next | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------+------------------+------------+-----------------+----------------+--------------- + 1003 | @ 12 hours | t | 1 | 1 | 0 | 0 +(1 row) + +--view is populated +SELECT * FROM test_continuous_agg_view_user_2 ORDER BY 1; + time_bucket | value +-------------+------- + 0 | 1 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +--revoke permissions from the continuous agg view owner to select from raw table +--no further updates to cont agg should happen +REVOKE SELECT ON test_continuous_agg_table_w_grant FROM public; +--add new data to table +INSERT INTO test_continuous_agg_table_w_grant VALUES(5,1); +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +--advance time by 12h so that job tries to run one more time +SELECT ts_bgw_params_reset_time(extract(epoch from interval '12 hour')::bigint * 1000000, true); + ts_bgw_params_reset_time +-------------------------- + +(1 row) + +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25, 25); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +--should show a failing execution because no longer has permissions (due to lack of permission on partial view owner's part) +SELECT job_id, last_run_success, total_runs, total_successes, total_failures, total_crashes + FROM _timescaledb_internal.bgw_job_stat + where job_id=:job_id; + job_id | last_run_success | total_runs | total_successes | total_failures | total_crashes +--------+------------------+------------+-----------------+----------------+--------------- + 1003 | f | 2 | 1 | 1 | 0 +(1 row) + +--view was NOT updated; but the old stuff is still there +SELECT * FROM test_continuous_agg_view_user_2; + time_bucket | value +-------------+------- + 0 | 1 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT * from sorted_bgw_log; + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + 0 | 0 | DB Scheduler | [TESTING] Registered new background worker + 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 0 | Refresh Continuous Aggregate Policy [1003] | refreshing continuous aggregate "test_continuous_agg_view_user_2" in window [ -2147483648, 2 ] + 1 | 0 | Refresh Continuous Aggregate Policy [1003] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_6" + 2 | 0 | Refresh Continuous Aggregate Policy [1003] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_6" + 3 | 0 | Refresh Continuous Aggregate Policy [1003] | job 1003 (Refresh Continuous Aggregate Policy [1003]) exiting with success: execution time (RANDOM) ms + 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker + 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) + 0 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | job 1003 threw an error + 1 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | permission denied for table test_continuous_agg_table_w_grant +(10 rows) + +-- Count the number of continuous aggregate policies +SELECT count(*) FROM _timescaledb_config.bgw_job + WHERE proc_schema = '_timescaledb_functions' + AND proc_name = 'policy_refresh_continuous_aggregate'; + count +------- + 1 +(1 row) + +-- cleanup +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +DROP DATABASE :DATA_NODE_1 WITH (FORCE); +DROP DATABASE :DATA_NODE_2 WITH (FORCE); +DROP DATABASE :DATA_NODE_3 WITH (FORCE); diff --git a/tsl/test/expected/cagg_ddl_dist_ht-13.out b/tsl/test/expected/cagg_ddl_dist_ht-13.out index 9f782583746..d2ea010eda9 100644 --- a/tsl/test/expected/cagg_ddl_dist_ht-13.out +++ b/tsl/test/expected/cagg_ddl_dist_ht-13.out @@ -74,6 +74,11 @@ SELECT table_name FROM create_hypertable('conditions', 'timec'); \endif -- schema tests \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +-- drop if the tablespace1 and/or tablespace2 exists +SET client_min_messages TO error; +DROP TABLESPACE IF EXISTS tablespace1; +DROP TABLESPACE IF EXISTS tablespace2; +RESET client_min_messages; CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; CREATE SCHEMA rename_schema; @@ -265,7 +270,7 @@ SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, -- drop_chunks tests DROP TABLE conditions CASCADE; DROP TABLE foo CASCADE; -psql:include/cagg_ddl_common.sql:161: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:166: NOTICE: drop cascades to 2 other objects CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_id @@ -331,7 +336,7 @@ SELECT * FROM drop_chunks_view ORDER BY 1; SELECT drop_chunks(:'drop_chunks_mat_table', newer_than => -20, verbose => true); -psql:include/cagg_ddl_common.sql:213: ERROR: operation not supported on materialized hypertable +psql:include/cagg_ddl_common.sql:218: ERROR: operation not supported on materialized hypertable \set ON_ERROR_STOP 1 SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; count @@ -355,8 +360,8 @@ SELECT * FROM drop_chunks_view ORDER BY 1; -- drop chunks when the chunksize and time_bucket aren't aligned DROP TABLE drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk +psql:include/cagg_ddl_common.sql:227: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:227: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_u_id @@ -423,7 +428,7 @@ TRUNCATE drop_chunks_table_u; \set ON_ERROR_STOP 0 -- Can't truncate materialized hypertables directly TRUNCATE :drop_chunks_mat_table_u; -psql:include/cagg_ddl_common.sql:271: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate +psql:include/cagg_ddl_common.sql:276: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate \set ON_ERROR_STOP 1 -- Check that we don't interfere with TRUNCATE of normal table and -- partitioned table @@ -450,31 +455,31 @@ SELECT * FROM truncate_partitioned; \set ON_ERROR_STOP 0 -- test a variety of ALTER TABLE statements ALTER TABLE :drop_chunks_mat_table_u RENAME time_bucket TO bad_name; -psql:include/cagg_ddl_common.sql:291: ERROR: renaming columns on materialization tables is not supported +psql:include/cagg_ddl_common.sql:296: ERROR: renaming columns on materialization tables is not supported ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(time_bucket); -psql:include/cagg_ddl_common.sql:292: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED; -psql:include/cagg_ddl_common.sql:293: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY; -psql:include/cagg_ddl_common.sql:294: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ADD COLUMN fizzle INTEGER; -psql:include/cagg_ddl_common.sql:295: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u DROP COLUMN time_bucket; -psql:include/cagg_ddl_common.sql:296: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket DROP NOT NULL; -psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET DEFAULT 1; -psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET STORAGE EXTERNAL; -psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:304: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u DISABLE TRIGGER ALL; -psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:305: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u SET TABLESPACE foo; -psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:306: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u NOT OF; -psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:307: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u OWNER TO CURRENT_USER; -psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:308: ERROR: operation not supported on materialization tables \set ON_ERROR_STOP 1 ALTER TABLE :drop_chunks_mat_table_u SET SCHEMA public; ALTER TABLE :drop_chunks_mat_table_u_name RENAME TO new_name; @@ -510,7 +515,7 @@ CREATE MATERIALIZED VIEW new_name_view AS SELECT time_bucket('6', time_bucket), COUNT("count") FROM new_name GROUP BY 1 WITH NO DATA; -psql:include/cagg_ddl_common.sql:326: ERROR: hypertable is a continuous aggregate materialization table +psql:include/cagg_ddl_common.sql:331: ERROR: hypertable is a continuous aggregate materialization table \set ON_ERROR_STOP 1 CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); \if :IS_DISTRIBUTED @@ -552,10 +557,10 @@ SELECT * FROM cagg_expr ORDER BY time LIMIT 5; --test materialization of invalidation before drop DROP TABLE IF EXISTS drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:358: NOTICE: table "drop_chunks_table" does not exist, skipping +psql:include/cagg_ddl_common.sql:363: NOTICE: table "drop_chunks_table" does not exist, skipping DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; -psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk +psql:include/cagg_ddl_common.sql:364: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:364: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_nid @@ -751,7 +756,7 @@ SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); -psql:include/cagg_ddl_common.sql:454: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk +psql:include/cagg_ddl_common.sql:459: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk drop_chunks ------------------------------------------ _timescaledb_internal._hyper_11_17_chunk @@ -772,7 +777,7 @@ WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integ \set ON_ERROR_STOP 0 \set VERBOSITY default SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); -psql:include/cagg_ddl_common.sql:466: ERROR: operation not supported on materialized hypertable +psql:include/cagg_ddl_common.sql:471: ERROR: operation not supported on materialized hypertable DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. HINT: Try the operation on the continuous aggregate instead. \set VERBOSITY terse @@ -1031,9 +1036,9 @@ SELECT user_view, (2 rows) DROP MATERIALIZED VIEW whatever_view_1; -psql:include/cagg_ddl_common.sql:644: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +psql:include/cagg_ddl_common.sql:649: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk DROP MATERIALIZED VIEW whatever_view_2; -psql:include/cagg_ddl_common.sql:645: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +psql:include/cagg_ddl_common.sql:650: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk -- test bucket width expressions on integer hypertables CREATE TABLE metrics_int2 ( time int2 NOT NULL, @@ -1144,39 +1149,39 @@ CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.ma SELECT time_bucket(1::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:750: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:755: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1::smallint + 2::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:757: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:762: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; -- width expression for int4 hypertables CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:765: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:770: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1 + 2, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:772: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:777: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; -- width expression for int8 hypertables CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:780: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:785: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1 + 2, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:787: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:792: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; \set ON_ERROR_STOP 0 -- non-immutable expresions should be rejected @@ -1184,17 +1189,17 @@ CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.ma SELECT time_bucket(extract(year FROM now())::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:796: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(extract(year FROM now())::int, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(extract(year FROM now())::int, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:811: ERROR: only immutable expressions allowed in time bucket function \set ON_ERROR_STOP 1 -- Test various ALTER MATERIALIZED VIEW statements. SET ROLE :ROLE_DEFAULT_PERM_USER; @@ -1221,7 +1226,7 @@ tablespace | -- we test that the normal checks are done when changing the owner. \set ON_ERROR_STOP 0 ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; -psql:include/cagg_ddl_common.sql:826: ERROR: must be member of role "test_role_1" +psql:include/cagg_ddl_common.sql:831: ERROR: must be member of role "test_role_1" \set ON_ERROR_STOP 1 -- Superuser can always change owner SET ROLE :ROLE_CLUSTER_SUPERUSER; @@ -1285,9 +1290,9 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:874: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:879: NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4; -psql:include/cagg_ddl_common.sql:876: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +psql:include/cagg_ddl_common.sql:881: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk -- Case 2: DROP CASCADE should have similar behaviour as DROP CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1295,9 +1300,9 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:884: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:889: NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:886: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +psql:include/cagg_ddl_common.sql:891: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk -- Case 3: require CASCADE in case of dependent object CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1305,16 +1310,16 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:894: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:899: NOTICE: refreshing continuous aggregate "conditionsnm_4" CREATE VIEW see_cagg as select * from conditionsnm_4; \set ON_ERROR_STOP 0 DROP MATERIALIZED VIEW conditionsnm_4; -psql:include/cagg_ddl_common.sql:898: ERROR: cannot drop view conditionsnm_4 because other objects depend on it +psql:include/cagg_ddl_common.sql:903: ERROR: cannot drop view conditionsnm_4 because other objects depend on it \set ON_ERROR_STOP 1 -- Case 4: DROP CASCADE with dependency DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to view see_cagg -psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +psql:include/cagg_ddl_common.sql:907: NOTICE: drop cascades to view see_cagg +psql:include/cagg_ddl_common.sql:907: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk -- Test DROP SCHEMA CASCADE with continuous aggregates -- -- Issue: #2350 @@ -1357,7 +1362,7 @@ WHERE user_view_name = 'telemetry_1s'; \gset DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:941: NOTICE: drop cascades to 4 other objects +psql:include/cagg_ddl_common.sql:946: NOTICE: drop cascades to 4 other objects SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; count ------- @@ -1441,7 +1446,7 @@ WHERE user_view_name = 'cagg2'; \gset DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:998: NOTICE: drop cascades to 7 other objects +psql:include/cagg_ddl_common.sql:1003: NOTICE: drop cascades to 7 other objects SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; count ------- @@ -1611,10 +1616,10 @@ CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); \set ON_ERROR_STOP 0 -- unique indexes are not supported CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); -psql:include/cagg_ddl_common.sql:1084: ERROR: continuous aggregates do not support UNIQUE indexes +psql:include/cagg_ddl_common.sql:1089: ERROR: continuous aggregates do not support UNIQUE indexes -- concurrently index creation not supported CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); -psql:include/cagg_ddl_common.sql:1086: ERROR: hypertables do not support concurrent index creation +psql:include/cagg_ddl_common.sql:1091: ERROR: hypertables do not support concurrent index creation \set ON_ERROR_STOP 1 CREATE INDEX index_avg ON conditions_daily (avg); CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); @@ -1651,14 +1656,14 @@ CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous, timescaledb.m AS SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; -psql:include/cagg_ddl_common.sql:1108: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date +psql:include/cagg_ddl_common.sql:1113: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket FROM i3696 GROUP BY cnt + cnt2, bucket, search_query HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; -psql:include/cagg_ddl_common.sql:1116: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date +psql:include/cagg_ddl_common.sql:1121: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); --TEST test with multiple settings on continuous aggregates -- -- test for materialized_only + compress combinations (real time aggs enabled initially) @@ -1675,7 +1680,7 @@ SELECT create_hypertable('test_setting', 'time'); \endif CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1130: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date +psql:include/cagg_ddl_common.sql:1135: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date INSERT INTO test_setting SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); @@ -1689,7 +1694,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1141: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1146: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1707,7 +1712,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1149: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1154: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1759,10 +1764,10 @@ DELETE FROM test_setting WHERE val = 20; --TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- -- test for materialized_only + compress combinations (real time aggs enabled initially) DROP MATERIALIZED VIEW test_setting_cagg; -psql:include/cagg_ddl_common.sql:1174: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +psql:include/cagg_ddl_common.sql:1179: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1177: NOTICE: refreshing continuous aggregate "test_setting_cagg" +psql:include/cagg_ddl_common.sql:1182: NOTICE: refreshing continuous aggregate "test_setting_cagg" CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); SELECT count(*) from test_setting_cagg ORDER BY 1; count @@ -1774,7 +1779,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1185: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1190: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1792,7 +1797,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1193: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1198: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1892,7 +1897,7 @@ SELECT time_bucket ('1 day', time) AS bucket, amount + sum(fiat_value) FROM transactions GROUP BY bucket, amount; -psql:include/cagg_ddl_common.sql:1267: NOTICE: refreshing continuous aggregate "cashflows" +psql:include/cagg_ddl_common.sql:1272: NOTICE: refreshing continuous aggregate "cashflows" SELECT h.table_name AS "MAT_TABLE_NAME", partial_view_name AS "PART_VIEW_NAME", direct_view_name AS "DIRECT_VIEW_NAME" @@ -2084,8 +2089,8 @@ WHERE d.hypertable_id = ca.mat_hypertable_id; -- Since #6077 CAggs are materialized only by default DROP TABLE conditions CASCADE; -psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 3 other objects -psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:1370: NOTICE: drop cascades to 3 other objects +psql:include/cagg_ddl_common.sql:1370: NOTICE: drop cascades to 2 other objects CREATE TABLE conditions ( time TIMESTAMPTZ NOT NULL, location TEXT NOT NULL, diff --git a/tsl/test/expected/cagg_ddl_dist_ht-14.out b/tsl/test/expected/cagg_ddl_dist_ht-14.out index 8c4e1394a03..d2ea010eda9 100644 --- a/tsl/test/expected/cagg_ddl_dist_ht-14.out +++ b/tsl/test/expected/cagg_ddl_dist_ht-14.out @@ -74,6 +74,11 @@ SELECT table_name FROM create_hypertable('conditions', 'timec'); \endif -- schema tests \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +-- drop if the tablespace1 and/or tablespace2 exists +SET client_min_messages TO error; +DROP TABLESPACE IF EXISTS tablespace1; +DROP TABLESPACE IF EXISTS tablespace2; +RESET client_min_messages; CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; CREATE SCHEMA rename_schema; @@ -265,7 +270,7 @@ SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, -- drop_chunks tests DROP TABLE conditions CASCADE; DROP TABLE foo CASCADE; -psql:include/cagg_ddl_common.sql:161: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:166: NOTICE: drop cascades to 2 other objects CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_id @@ -331,7 +336,7 @@ SELECT * FROM drop_chunks_view ORDER BY 1; SELECT drop_chunks(:'drop_chunks_mat_table', newer_than => -20, verbose => true); -psql:include/cagg_ddl_common.sql:213: ERROR: operation not supported on materialized hypertable +psql:include/cagg_ddl_common.sql:218: ERROR: operation not supported on materialized hypertable \set ON_ERROR_STOP 1 SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; count @@ -355,8 +360,8 @@ SELECT * FROM drop_chunks_view ORDER BY 1; -- drop chunks when the chunksize and time_bucket aren't aligned DROP TABLE drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk +psql:include/cagg_ddl_common.sql:227: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:227: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_u_id @@ -423,7 +428,7 @@ TRUNCATE drop_chunks_table_u; \set ON_ERROR_STOP 0 -- Can't truncate materialized hypertables directly TRUNCATE :drop_chunks_mat_table_u; -psql:include/cagg_ddl_common.sql:271: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate +psql:include/cagg_ddl_common.sql:276: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate \set ON_ERROR_STOP 1 -- Check that we don't interfere with TRUNCATE of normal table and -- partitioned table @@ -450,31 +455,31 @@ SELECT * FROM truncate_partitioned; \set ON_ERROR_STOP 0 -- test a variety of ALTER TABLE statements ALTER TABLE :drop_chunks_mat_table_u RENAME time_bucket TO bad_name; -psql:include/cagg_ddl_common.sql:291: ERROR: renaming columns on materialization tables is not supported +psql:include/cagg_ddl_common.sql:296: ERROR: renaming columns on materialization tables is not supported ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(time_bucket); -psql:include/cagg_ddl_common.sql:292: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED; -psql:include/cagg_ddl_common.sql:293: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY; -psql:include/cagg_ddl_common.sql:294: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ADD COLUMN fizzle INTEGER; -psql:include/cagg_ddl_common.sql:295: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u DROP COLUMN time_bucket; -psql:include/cagg_ddl_common.sql:296: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket DROP NOT NULL; -psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET DEFAULT 1; -psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET STORAGE EXTERNAL; -psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:304: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u DISABLE TRIGGER ALL; -psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:305: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u SET TABLESPACE foo; -psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:306: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u NOT OF; -psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:307: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u OWNER TO CURRENT_USER; -psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:308: ERROR: operation not supported on materialization tables \set ON_ERROR_STOP 1 ALTER TABLE :drop_chunks_mat_table_u SET SCHEMA public; ALTER TABLE :drop_chunks_mat_table_u_name RENAME TO new_name; @@ -510,7 +515,7 @@ CREATE MATERIALIZED VIEW new_name_view AS SELECT time_bucket('6', time_bucket), COUNT("count") FROM new_name GROUP BY 1 WITH NO DATA; -psql:include/cagg_ddl_common.sql:326: ERROR: hypertable is a continuous aggregate materialization table +psql:include/cagg_ddl_common.sql:331: ERROR: hypertable is a continuous aggregate materialization table \set ON_ERROR_STOP 1 CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); \if :IS_DISTRIBUTED @@ -552,10 +557,10 @@ SELECT * FROM cagg_expr ORDER BY time LIMIT 5; --test materialization of invalidation before drop DROP TABLE IF EXISTS drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:358: NOTICE: table "drop_chunks_table" does not exist, skipping +psql:include/cagg_ddl_common.sql:363: NOTICE: table "drop_chunks_table" does not exist, skipping DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; -psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk +psql:include/cagg_ddl_common.sql:364: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:364: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_nid @@ -659,7 +664,7 @@ SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; (1 row) --we see the chunks row with the dropped flags set; -SELECT * FROM _timescaledb_catalog.chunk where dropped; +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped; id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk ----+---------------+-----------------------+-------------------------+---------------------+---------+--------+----------- 13 | 10 | _timescaledb_internal | _dist_hyper_10_13_chunk | | t | 0 | f @@ -751,7 +756,7 @@ SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); -psql:include/cagg_ddl_common.sql:454: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk +psql:include/cagg_ddl_common.sql:459: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk drop_chunks ------------------------------------------ _timescaledb_internal._hyper_11_17_chunk @@ -772,7 +777,7 @@ WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integ \set ON_ERROR_STOP 0 \set VERBOSITY default SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); -psql:include/cagg_ddl_common.sql:466: ERROR: operation not supported on materialized hypertable +psql:include/cagg_ddl_common.sql:471: ERROR: operation not supported on materialized hypertable DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. HINT: Try the operation on the continuous aggregate instead. \set VERBOSITY terse @@ -1031,9 +1036,9 @@ SELECT user_view, (2 rows) DROP MATERIALIZED VIEW whatever_view_1; -psql:include/cagg_ddl_common.sql:644: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +psql:include/cagg_ddl_common.sql:649: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk DROP MATERIALIZED VIEW whatever_view_2; -psql:include/cagg_ddl_common.sql:645: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +psql:include/cagg_ddl_common.sql:650: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk -- test bucket width expressions on integer hypertables CREATE TABLE metrics_int2 ( time int2 NOT NULL, @@ -1144,39 +1149,39 @@ CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.ma SELECT time_bucket(1::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:750: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:755: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1::smallint + 2::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:757: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:762: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; -- width expression for int4 hypertables CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:765: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:770: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1 + 2, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:772: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:777: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; -- width expression for int8 hypertables CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:780: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:785: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1 + 2, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:787: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:792: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; \set ON_ERROR_STOP 0 -- non-immutable expresions should be rejected @@ -1184,17 +1189,17 @@ CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.ma SELECT time_bucket(extract(year FROM now())::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:796: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(extract(year FROM now())::int, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(extract(year FROM now())::int, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:811: ERROR: only immutable expressions allowed in time bucket function \set ON_ERROR_STOP 1 -- Test various ALTER MATERIALIZED VIEW statements. SET ROLE :ROLE_DEFAULT_PERM_USER; @@ -1221,7 +1226,7 @@ tablespace | -- we test that the normal checks are done when changing the owner. \set ON_ERROR_STOP 0 ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; -psql:include/cagg_ddl_common.sql:826: ERROR: must be member of role "test_role_1" +psql:include/cagg_ddl_common.sql:831: ERROR: must be member of role "test_role_1" \set ON_ERROR_STOP 1 -- Superuser can always change owner SET ROLE :ROLE_CLUSTER_SUPERUSER; @@ -1285,9 +1290,9 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:874: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:879: NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4; -psql:include/cagg_ddl_common.sql:876: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +psql:include/cagg_ddl_common.sql:881: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk -- Case 2: DROP CASCADE should have similar behaviour as DROP CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1295,9 +1300,9 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:884: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:889: NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:886: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +psql:include/cagg_ddl_common.sql:891: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk -- Case 3: require CASCADE in case of dependent object CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1305,16 +1310,16 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:894: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:899: NOTICE: refreshing continuous aggregate "conditionsnm_4" CREATE VIEW see_cagg as select * from conditionsnm_4; \set ON_ERROR_STOP 0 DROP MATERIALIZED VIEW conditionsnm_4; -psql:include/cagg_ddl_common.sql:898: ERROR: cannot drop view conditionsnm_4 because other objects depend on it +psql:include/cagg_ddl_common.sql:903: ERROR: cannot drop view conditionsnm_4 because other objects depend on it \set ON_ERROR_STOP 1 -- Case 4: DROP CASCADE with dependency DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to view see_cagg -psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +psql:include/cagg_ddl_common.sql:907: NOTICE: drop cascades to view see_cagg +psql:include/cagg_ddl_common.sql:907: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk -- Test DROP SCHEMA CASCADE with continuous aggregates -- -- Issue: #2350 @@ -1357,7 +1362,7 @@ WHERE user_view_name = 'telemetry_1s'; \gset DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:941: NOTICE: drop cascades to 4 other objects +psql:include/cagg_ddl_common.sql:946: NOTICE: drop cascades to 4 other objects SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; count ------- @@ -1441,7 +1446,7 @@ WHERE user_view_name = 'cagg2'; \gset DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:998: NOTICE: drop cascades to 7 other objects +psql:include/cagg_ddl_common.sql:1003: NOTICE: drop cascades to 7 other objects SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; count ------- @@ -1611,10 +1616,10 @@ CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); \set ON_ERROR_STOP 0 -- unique indexes are not supported CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); -psql:include/cagg_ddl_common.sql:1084: ERROR: continuous aggregates do not support UNIQUE indexes +psql:include/cagg_ddl_common.sql:1089: ERROR: continuous aggregates do not support UNIQUE indexes -- concurrently index creation not supported CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); -psql:include/cagg_ddl_common.sql:1086: ERROR: hypertables do not support concurrent index creation +psql:include/cagg_ddl_common.sql:1091: ERROR: hypertables do not support concurrent index creation \set ON_ERROR_STOP 1 CREATE INDEX index_avg ON conditions_daily (avg); CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); @@ -1651,14 +1656,14 @@ CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous, timescaledb.m AS SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; -psql:include/cagg_ddl_common.sql:1108: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date +psql:include/cagg_ddl_common.sql:1113: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket FROM i3696 GROUP BY cnt + cnt2, bucket, search_query HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; -psql:include/cagg_ddl_common.sql:1116: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date +psql:include/cagg_ddl_common.sql:1121: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); --TEST test with multiple settings on continuous aggregates -- -- test for materialized_only + compress combinations (real time aggs enabled initially) @@ -1675,7 +1680,7 @@ SELECT create_hypertable('test_setting', 'time'); \endif CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1130: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date +psql:include/cagg_ddl_common.sql:1135: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date INSERT INTO test_setting SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); @@ -1689,7 +1694,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1141: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1146: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1707,7 +1712,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1149: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1154: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1759,10 +1764,10 @@ DELETE FROM test_setting WHERE val = 20; --TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- -- test for materialized_only + compress combinations (real time aggs enabled initially) DROP MATERIALIZED VIEW test_setting_cagg; -psql:include/cagg_ddl_common.sql:1174: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +psql:include/cagg_ddl_common.sql:1179: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1177: NOTICE: refreshing continuous aggregate "test_setting_cagg" +psql:include/cagg_ddl_common.sql:1182: NOTICE: refreshing continuous aggregate "test_setting_cagg" CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); SELECT count(*) from test_setting_cagg ORDER BY 1; count @@ -1774,7 +1779,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1185: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1190: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1792,7 +1797,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1193: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1198: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1892,7 +1897,7 @@ SELECT time_bucket ('1 day', time) AS bucket, amount + sum(fiat_value) FROM transactions GROUP BY bucket, amount; -psql:include/cagg_ddl_common.sql:1267: NOTICE: refreshing continuous aggregate "cashflows" +psql:include/cagg_ddl_common.sql:1272: NOTICE: refreshing continuous aggregate "cashflows" SELECT h.table_name AS "MAT_TABLE_NAME", partial_view_name AS "PART_VIEW_NAME", direct_view_name AS "DIRECT_VIEW_NAME" @@ -2084,8 +2089,8 @@ WHERE d.hypertable_id = ca.mat_hypertable_id; -- Since #6077 CAggs are materialized only by default DROP TABLE conditions CASCADE; -psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 3 other objects -psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:1370: NOTICE: drop cascades to 3 other objects +psql:include/cagg_ddl_common.sql:1370: NOTICE: drop cascades to 2 other objects CREATE TABLE conditions ( time TIMESTAMPTZ NOT NULL, location TEXT NOT NULL, diff --git a/tsl/test/expected/cagg_ddl_dist_ht-15.out b/tsl/test/expected/cagg_ddl_dist_ht-15.out index 8c4e1394a03..d2ea010eda9 100644 --- a/tsl/test/expected/cagg_ddl_dist_ht-15.out +++ b/tsl/test/expected/cagg_ddl_dist_ht-15.out @@ -74,6 +74,11 @@ SELECT table_name FROM create_hypertable('conditions', 'timec'); \endif -- schema tests \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +-- drop if the tablespace1 and/or tablespace2 exists +SET client_min_messages TO error; +DROP TABLESPACE IF EXISTS tablespace1; +DROP TABLESPACE IF EXISTS tablespace2; +RESET client_min_messages; CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; CREATE SCHEMA rename_schema; @@ -265,7 +270,7 @@ SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, -- drop_chunks tests DROP TABLE conditions CASCADE; DROP TABLE foo CASCADE; -psql:include/cagg_ddl_common.sql:161: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:166: NOTICE: drop cascades to 2 other objects CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_id @@ -331,7 +336,7 @@ SELECT * FROM drop_chunks_view ORDER BY 1; SELECT drop_chunks(:'drop_chunks_mat_table', newer_than => -20, verbose => true); -psql:include/cagg_ddl_common.sql:213: ERROR: operation not supported on materialized hypertable +psql:include/cagg_ddl_common.sql:218: ERROR: operation not supported on materialized hypertable \set ON_ERROR_STOP 1 SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; count @@ -355,8 +360,8 @@ SELECT * FROM drop_chunks_view ORDER BY 1; -- drop chunks when the chunksize and time_bucket aren't aligned DROP TABLE drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk +psql:include/cagg_ddl_common.sql:227: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:227: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_u_id @@ -423,7 +428,7 @@ TRUNCATE drop_chunks_table_u; \set ON_ERROR_STOP 0 -- Can't truncate materialized hypertables directly TRUNCATE :drop_chunks_mat_table_u; -psql:include/cagg_ddl_common.sql:271: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate +psql:include/cagg_ddl_common.sql:276: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate \set ON_ERROR_STOP 1 -- Check that we don't interfere with TRUNCATE of normal table and -- partitioned table @@ -450,31 +455,31 @@ SELECT * FROM truncate_partitioned; \set ON_ERROR_STOP 0 -- test a variety of ALTER TABLE statements ALTER TABLE :drop_chunks_mat_table_u RENAME time_bucket TO bad_name; -psql:include/cagg_ddl_common.sql:291: ERROR: renaming columns on materialization tables is not supported +psql:include/cagg_ddl_common.sql:296: ERROR: renaming columns on materialization tables is not supported ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(time_bucket); -psql:include/cagg_ddl_common.sql:292: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED; -psql:include/cagg_ddl_common.sql:293: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY; -psql:include/cagg_ddl_common.sql:294: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ADD COLUMN fizzle INTEGER; -psql:include/cagg_ddl_common.sql:295: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u DROP COLUMN time_bucket; -psql:include/cagg_ddl_common.sql:296: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket DROP NOT NULL; -psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET DEFAULT 1; -psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET STORAGE EXTERNAL; -psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:304: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u DISABLE TRIGGER ALL; -psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:305: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u SET TABLESPACE foo; -psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:306: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u NOT OF; -psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:307: ERROR: operation not supported on materialization tables ALTER TABLE :drop_chunks_mat_table_u OWNER TO CURRENT_USER; -psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables +psql:include/cagg_ddl_common.sql:308: ERROR: operation not supported on materialization tables \set ON_ERROR_STOP 1 ALTER TABLE :drop_chunks_mat_table_u SET SCHEMA public; ALTER TABLE :drop_chunks_mat_table_u_name RENAME TO new_name; @@ -510,7 +515,7 @@ CREATE MATERIALIZED VIEW new_name_view AS SELECT time_bucket('6', time_bucket), COUNT("count") FROM new_name GROUP BY 1 WITH NO DATA; -psql:include/cagg_ddl_common.sql:326: ERROR: hypertable is a continuous aggregate materialization table +psql:include/cagg_ddl_common.sql:331: ERROR: hypertable is a continuous aggregate materialization table \set ON_ERROR_STOP 1 CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); \if :IS_DISTRIBUTED @@ -552,10 +557,10 @@ SELECT * FROM cagg_expr ORDER BY time LIMIT 5; --test materialization of invalidation before drop DROP TABLE IF EXISTS drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:358: NOTICE: table "drop_chunks_table" does not exist, skipping +psql:include/cagg_ddl_common.sql:363: NOTICE: table "drop_chunks_table" does not exist, skipping DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; -psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk +psql:include/cagg_ddl_common.sql:364: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:364: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_nid @@ -659,7 +664,7 @@ SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; (1 row) --we see the chunks row with the dropped flags set; -SELECT * FROM _timescaledb_catalog.chunk where dropped; +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped; id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk ----+---------------+-----------------------+-------------------------+---------------------+---------+--------+----------- 13 | 10 | _timescaledb_internal | _dist_hyper_10_13_chunk | | t | 0 | f @@ -751,7 +756,7 @@ SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); -psql:include/cagg_ddl_common.sql:454: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk +psql:include/cagg_ddl_common.sql:459: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk drop_chunks ------------------------------------------ _timescaledb_internal._hyper_11_17_chunk @@ -772,7 +777,7 @@ WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integ \set ON_ERROR_STOP 0 \set VERBOSITY default SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); -psql:include/cagg_ddl_common.sql:466: ERROR: operation not supported on materialized hypertable +psql:include/cagg_ddl_common.sql:471: ERROR: operation not supported on materialized hypertable DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. HINT: Try the operation on the continuous aggregate instead. \set VERBOSITY terse @@ -1031,9 +1036,9 @@ SELECT user_view, (2 rows) DROP MATERIALIZED VIEW whatever_view_1; -psql:include/cagg_ddl_common.sql:644: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +psql:include/cagg_ddl_common.sql:649: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk DROP MATERIALIZED VIEW whatever_view_2; -psql:include/cagg_ddl_common.sql:645: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +psql:include/cagg_ddl_common.sql:650: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk -- test bucket width expressions on integer hypertables CREATE TABLE metrics_int2 ( time int2 NOT NULL, @@ -1144,39 +1149,39 @@ CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.ma SELECT time_bucket(1::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:750: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:755: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1::smallint + 2::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:757: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:762: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; -- width expression for int4 hypertables CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:765: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:770: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1 + 2, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:772: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:777: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; -- width expression for int8 hypertables CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:780: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:785: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(1 + 2, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:787: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:792: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; \set ON_ERROR_STOP 0 -- non-immutable expresions should be rejected @@ -1184,17 +1189,17 @@ CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.ma SELECT time_bucket(extract(year FROM now())::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:796: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(extract(year FROM now())::int, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(extract(year FROM now())::int, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:811: ERROR: only immutable expressions allowed in time bucket function \set ON_ERROR_STOP 1 -- Test various ALTER MATERIALIZED VIEW statements. SET ROLE :ROLE_DEFAULT_PERM_USER; @@ -1221,7 +1226,7 @@ tablespace | -- we test that the normal checks are done when changing the owner. \set ON_ERROR_STOP 0 ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; -psql:include/cagg_ddl_common.sql:826: ERROR: must be member of role "test_role_1" +psql:include/cagg_ddl_common.sql:831: ERROR: must be member of role "test_role_1" \set ON_ERROR_STOP 1 -- Superuser can always change owner SET ROLE :ROLE_CLUSTER_SUPERUSER; @@ -1285,9 +1290,9 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:874: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:879: NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4; -psql:include/cagg_ddl_common.sql:876: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +psql:include/cagg_ddl_common.sql:881: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk -- Case 2: DROP CASCADE should have similar behaviour as DROP CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1295,9 +1300,9 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:884: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:889: NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:886: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +psql:include/cagg_ddl_common.sql:891: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk -- Case 3: require CASCADE in case of dependent object CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1305,16 +1310,16 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:894: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:899: NOTICE: refreshing continuous aggregate "conditionsnm_4" CREATE VIEW see_cagg as select * from conditionsnm_4; \set ON_ERROR_STOP 0 DROP MATERIALIZED VIEW conditionsnm_4; -psql:include/cagg_ddl_common.sql:898: ERROR: cannot drop view conditionsnm_4 because other objects depend on it +psql:include/cagg_ddl_common.sql:903: ERROR: cannot drop view conditionsnm_4 because other objects depend on it \set ON_ERROR_STOP 1 -- Case 4: DROP CASCADE with dependency DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to view see_cagg -psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +psql:include/cagg_ddl_common.sql:907: NOTICE: drop cascades to view see_cagg +psql:include/cagg_ddl_common.sql:907: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk -- Test DROP SCHEMA CASCADE with continuous aggregates -- -- Issue: #2350 @@ -1357,7 +1362,7 @@ WHERE user_view_name = 'telemetry_1s'; \gset DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:941: NOTICE: drop cascades to 4 other objects +psql:include/cagg_ddl_common.sql:946: NOTICE: drop cascades to 4 other objects SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; count ------- @@ -1441,7 +1446,7 @@ WHERE user_view_name = 'cagg2'; \gset DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:998: NOTICE: drop cascades to 7 other objects +psql:include/cagg_ddl_common.sql:1003: NOTICE: drop cascades to 7 other objects SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; count ------- @@ -1611,10 +1616,10 @@ CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); \set ON_ERROR_STOP 0 -- unique indexes are not supported CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); -psql:include/cagg_ddl_common.sql:1084: ERROR: continuous aggregates do not support UNIQUE indexes +psql:include/cagg_ddl_common.sql:1089: ERROR: continuous aggregates do not support UNIQUE indexes -- concurrently index creation not supported CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); -psql:include/cagg_ddl_common.sql:1086: ERROR: hypertables do not support concurrent index creation +psql:include/cagg_ddl_common.sql:1091: ERROR: hypertables do not support concurrent index creation \set ON_ERROR_STOP 1 CREATE INDEX index_avg ON conditions_daily (avg); CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); @@ -1651,14 +1656,14 @@ CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous, timescaledb.m AS SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; -psql:include/cagg_ddl_common.sql:1108: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date +psql:include/cagg_ddl_common.sql:1113: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket FROM i3696 GROUP BY cnt + cnt2, bucket, search_query HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; -psql:include/cagg_ddl_common.sql:1116: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date +psql:include/cagg_ddl_common.sql:1121: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); --TEST test with multiple settings on continuous aggregates -- -- test for materialized_only + compress combinations (real time aggs enabled initially) @@ -1675,7 +1680,7 @@ SELECT create_hypertable('test_setting', 'time'); \endif CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1130: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date +psql:include/cagg_ddl_common.sql:1135: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date INSERT INTO test_setting SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); @@ -1689,7 +1694,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1141: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1146: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1707,7 +1712,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1149: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1154: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1759,10 +1764,10 @@ DELETE FROM test_setting WHERE val = 20; --TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- -- test for materialized_only + compress combinations (real time aggs enabled initially) DROP MATERIALIZED VIEW test_setting_cagg; -psql:include/cagg_ddl_common.sql:1174: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +psql:include/cagg_ddl_common.sql:1179: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1177: NOTICE: refreshing continuous aggregate "test_setting_cagg" +psql:include/cagg_ddl_common.sql:1182: NOTICE: refreshing continuous aggregate "test_setting_cagg" CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); SELECT count(*) from test_setting_cagg ORDER BY 1; count @@ -1774,7 +1779,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1185: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1190: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1792,7 +1797,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1193: NOTICE: defaulting compress_orderby to time_bucket +psql:include/cagg_ddl_common.sql:1198: NOTICE: defaulting compress_orderby to time_bucket SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1892,7 +1897,7 @@ SELECT time_bucket ('1 day', time) AS bucket, amount + sum(fiat_value) FROM transactions GROUP BY bucket, amount; -psql:include/cagg_ddl_common.sql:1267: NOTICE: refreshing continuous aggregate "cashflows" +psql:include/cagg_ddl_common.sql:1272: NOTICE: refreshing continuous aggregate "cashflows" SELECT h.table_name AS "MAT_TABLE_NAME", partial_view_name AS "PART_VIEW_NAME", direct_view_name AS "DIRECT_VIEW_NAME" @@ -2084,8 +2089,8 @@ WHERE d.hypertable_id = ca.mat_hypertable_id; -- Since #6077 CAggs are materialized only by default DROP TABLE conditions CASCADE; -psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 3 other objects -psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:1370: NOTICE: drop cascades to 3 other objects +psql:include/cagg_ddl_common.sql:1370: NOTICE: drop cascades to 2 other objects CREATE TABLE conditions ( time TIMESTAMPTZ NOT NULL, location TEXT NOT NULL, diff --git a/tsl/test/expected/dist_hypertable-16.out b/tsl/test/expected/dist_hypertable-16.out index 23f808ff7ba..bf21e134df3 100644 --- a/tsl/test/expected/dist_hypertable-16.out +++ b/tsl/test/expected/dist_hypertable-16.out @@ -891,9 +891,10 @@ FROM disttable ORDER BY device, temp; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Sort + Incremental Sort Output: disttable.device, disttable.temp, (avg(disttable.temp) OVER (?)) Sort Key: disttable.device, disttable.temp + Presorted Key: disttable.device -> WindowAgg Output: disttable.device, disttable.temp, avg(disttable.temp) OVER (?) -> Custom Scan (AsyncAppend) @@ -915,7 +916,7 @@ ORDER BY device, temp; Data node: db_dist_hypertable_3 Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_6_chunk Remote SQL: SELECT device, temp FROM public.disttable WHERE _timescaledb_functions.chunks_in(public.disttable.*, ARRAY[1, 2]) ORDER BY device ASC NULLS LAST -(24 rows) +(25 rows) SELECT device, temp, avg(temp) OVER (PARTITION BY device) FROM disttable diff --git a/tsl/test/expected/dist_param-13.out b/tsl/test/expected/dist_param-13.out new file mode 100644 index 00000000000..7809fd80f6f --- /dev/null +++ b/tsl/test/expected/dist_param-13.out @@ -0,0 +1,815 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Test parameterized data node scan. +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +\set DN_DBNAME_1 :TEST_DBNAME _1 +-- pg_regress doesn't drop these databases for repeated invocation such as in +-- the flaky check. +set client_min_messages to ERROR; +drop database if exists :"DN_DBNAME_1" with (force); +select 1 from add_data_node('data_node_1', host => 'localhost', + database => :'DN_DBNAME_1'); + ?column? +---------- + 1 +(1 row) + +grant usage on foreign server data_node_1 to public; +grant create on schema public to :ROLE_1; +set role :ROLE_1; +reset client_min_messages; +\set ON_ERROR_STOP 0 +-- helper function: float -> pseudorandom float [0..1]. +create or replace function mix(x float4) returns float4 as $$ select ((hashfloat4(x) / (pow(2., 31) - 1) + 1) / 2)::float4 $$ language sql; +-- distributed hypertable +create table metric_dist(ts timestamptz, id int, value float); +select create_distributed_hypertable('metric_dist', 'ts', 'id'); +WARNING: only one data node was assigned to the hypertable +NOTICE: adding not-null constraint to column "ts" + create_distributed_hypertable +------------------------------- + (1,public,metric_dist,t) +(1 row) + +insert into metric_dist + select '2022-02-02 02:02:02+03'::timestamptz + interval '1 year' * mix(x), + mix(x + 1.) * 20, + mix(x + 2.) * 50 + from generate_series(1, 1000000) x(x) +; +analyze metric_dist; +select count(*) from show_chunks('metric_dist'); + count +------- + 53 +(1 row) + +-- dictionary +create table metric_name(id int, name text collate "C", + constraint metric_name_name unique (name), + constraint metric_name_id primary key (id)); +insert into metric_name values (1, 'cpu1'), (3, 'cpu3'), (7, 'cpu7'); +insert into metric_name select x, 'other' || x + from generate_series(1000, 10000) x +; +analyze metric_name; +-- for predictable plans +set enable_hashagg to off; +set enable_material to off; +set enable_mergejoin to off; +-- not present on PG 12 +\set ECHO errors +-- Subquery + IN +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +explain (costs off, verbose) +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_dist.id, max(metric_dist.value), count(*) + Group Key: metric_dist.id + -> Sort + Output: metric_dist.id, metric_dist.value + Sort Key: metric_dist.id + -> Nested Loop + Output: metric_dist.id, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(17 rows) + +-- Check that the GUC to disable these plans works. Our cost model is very +-- heuristic and may be often wrong, so there needs to be a way to disable them. +set timescaledb.enable_parameterized_data_node_scan to false; +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +explain (costs off, verbose) +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: metric_dist.id, max(metric_dist.value), count(*) + Group Key: metric_dist.id + -> Sort + Output: metric_dist.id, metric_dist.value + Sort Key: metric_dist.id + -> Hash Join + Output: metric_dist.id, metric_dist.value + Inner Unique: true + Hash Cond: (metric_dist.id = metric_name.id) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) ORDER BY id ASC NULLS LAST + -> Hash + Output: metric_name.id + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) +(21 rows) + +reset timescaledb.enable_parameterized_data_node_scan; +-- All fetcher types with join +set timescaledb.remote_data_fetcher = 'copy'; +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; +ERROR: cannot use COPY fetcher because the plan is parameterized +set timescaledb.remote_data_fetcher = 'cursor'; +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +set timescaledb.remote_data_fetcher = 'prepared'; +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +-- All fetcher types with initplan +set timescaledb.remote_data_fetcher = 'copy'; +select id, max(value), count(*) +from metric_dist +where id = any((select array_agg(id) from metric_name where name like 'cpu%')::int[]) + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; +ERROR: cannot use COPY fetcher because the plan is parameterized +set timescaledb.remote_data_fetcher = 'cursor'; +select id, max(value), count(*) +from metric_dist +where id = any((select array_agg(id) from metric_name where name like 'cpu%')::int[]) + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +set timescaledb.remote_data_fetcher = 'prepared'; +select id, max(value), count(*) +from metric_dist +where id = any((select array_agg(id) from metric_name where name like 'cpu%')::int[]) + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +-- Should prefer prepared statement data fetcher for these queries. +set timescaledb.remote_data_fetcher = 'auto'; +explain (analyze, verbose, costs off, timing off, summary off) +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate (actual rows=3 loops=1) + Output: metric_dist.id, max(metric_dist.value), count(*) + Group Key: metric_dist.id + -> Sort (actual rows=12609 loops=1) + Output: metric_dist.id, metric_dist.value + Sort Key: metric_dist.id + Sort Method: quicksort + -> Nested Loop (actual rows=12609 loops=1) + Output: metric_dist.id, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name (actual rows=3 loops=1) + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist (actual rows=4203 loops=3) + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Fetcher Type: Prepared statement + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(19 rows) + +-- Should reset the prepared cache mode after using the prepared statement fetcher. +call distributed_exec('create or replace procedure assert_auto_plan_cache_mode() as $$ begin assert (select setting from pg_settings where name = ''plan_cache_mode'') = ''auto''; end; $$ language plpgsql;'); +call distributed_exec('call assert_auto_plan_cache_mode();'); +-- Shippable EC join +select name, max(value), count(*) +from metric_dist join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + name | max | count +------+------------------+------- + cpu1 | 49.9941974878311 | 4174 + cpu3 | 49.9958902597427 | 4119 + cpu7 | 49.9881327152252 | 4316 +(3 rows) + +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Nested Loop + Output: metric_name.name, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(14 rows) + +-- Shipping still might make sense if the local table is outer. +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist right join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Nested Loop + Output: metric_name.name, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(14 rows) + +-- Shipping doesn't make sense if the distributed table is outer. +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist +left join (select * from metric_name where name like 'cpu%') t using (id) +where ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Sort + Output: metric_name.name, metric_dist.value + Sort Key: metric_name.name COLLATE "C" + -> Hash Left Join + Output: metric_name.name, metric_dist.value + Inner Unique: true + Hash Cond: (metric_dist.id = metric_name.id) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) + -> Hash + Output: metric_name.name, metric_name.id + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.name, metric_name.id + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) +(21 rows) + +-- Non-shippable EC join +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name on name = concat('cpu', metric_dist.id) +where metric_name.name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Sort + Output: metric_name.name, metric_dist.value + Sort Key: metric_name.name COLLATE "C" + -> Hash Join + Output: metric_name.name, metric_dist.value + Inner Unique: true + Hash Cond: ((concat('cpu', metric_dist.id))::text = metric_name.name) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) + -> Hash + Output: metric_name.name + -> Index Only Scan using metric_name_name on public.metric_name + Output: metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) +(21 rows) + +-- Shippable non-EC join. The weird condition is to only use immutable functions +-- that can be shipped to the remote node. `id::text` does CoerceViaIO which is +-- not generally shippable. And `int4out` returns cstring, not text, that's why +-- the `textin` is needed. +select name, max(value), count(*) +from metric_dist join metric_name + on texteq('cpu' || textin(int4out(metric_dist.id)), name) +where metric_name.name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + name | max | count +------+------------------+------- + cpu1 | 49.9941974878311 | 4174 + cpu3 | 49.9958902597427 | 4119 + cpu7 | 49.9881327152252 | 4316 +(3 rows) + +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name + on texteq('cpu' || textin(int4out(metric_dist.id)), name) +where metric_name.name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Nested Loop + Output: metric_name.name, metric_dist.value + -> Index Only Scan using metric_name_name on public.metric_name + Output: metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (texteq(('cpu'::text || textin(int4out(id))), $1::text)) +(14 rows) + +-- Non-shippable non-EC join. +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name + on texteq(concat('cpu', textin(int4out(metric_dist.id))), name) +where metric_name.name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Nested Loop + Output: metric_name.name, metric_dist.value + Join Filter: texteq(concat('cpu', textin(int4out(metric_dist.id))), metric_name.name) + -> Index Only Scan using metric_name_name on public.metric_name + Output: metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) +(15 rows) + +-- distinct on, order by, limit 1, with subquery +select distinct on (id) + id, ts, value +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by id, ts, value +limit 1 +; + id | ts | value +----+----------------------------------+------------------ + 1 | Tue Feb 01 15:03:56.048 2022 PST | 36.1639380455017 +(1 row) + +explain (costs off, verbose) +select distinct on (id) + id, ts, value +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by id, ts, value +limit 1 +; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: metric_dist.id, metric_dist.ts, metric_dist.value + -> Unique + Output: metric_dist.id, metric_dist.ts, metric_dist.value + -> Sort + Output: metric_dist.id, metric_dist.ts, metric_dist.value + Sort Key: metric_dist.id, metric_dist.ts, metric_dist.value + -> Nested Loop + Output: metric_dist.id, metric_dist.ts, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.ts, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(18 rows) + +-- distinct on, order by, limit 1, with explicit join +select distinct on (name) + name, ts, value +from metric_dist join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by name, ts, value +limit 1 +; + name | ts | value +------+----------------------------------+------------------ + cpu1 | Tue Feb 01 15:03:56.048 2022 PST | 36.1639380455017 +(1 row) + +explain (costs off, verbose) +select distinct on (name) + name, ts, value +from metric_dist join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by name, ts, value +limit 1 +; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: metric_name.name, metric_dist.ts, metric_dist.value + -> Unique + Output: metric_name.name, metric_dist.ts, metric_dist.value + -> Sort + Output: metric_name.name, metric_dist.ts, metric_dist.value + Sort Key: metric_name.name COLLATE "C", metric_dist.ts, metric_dist.value + -> Nested Loop + Output: metric_name.name, metric_dist.ts, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.ts, metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(18 rows) + +-- If there are a lot of rows chosen from the local table, the parameterized +-- nested loop might download the entire dist table or even more than that (in +-- case of not equi-join or duplicate join keys). +-- Check that the parameterized plan is not chosen in this case. +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist +join metric_name using (id) +where ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Sort + Output: metric_name.name, metric_dist.value + Sort Key: metric_name.name COLLATE "C" + -> Hash Join + Output: metric_name.name, metric_dist.value + Inner Unique: true + Hash Cond: (metric_dist.id = metric_name.id) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) + -> Hash + Output: metric_name.name, metric_name.id + -> Seq Scan on public.metric_name + Output: metric_name.name, metric_name.id +(19 rows) + +-- An interesting special case is when the remote SQL has a parameter, but it is +-- the result of an initplan. It's not "parameterized" in the join sense, because +-- there is only one param value. This is the most efficient plan for querying a +-- small number of ids. +explain (costs off, verbose) +select id, max(value) +from metric_dist +where id = any((select array_agg(id) from metric_name where name like 'cpu%')::int[]) + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_dist.id, max(metric_dist.value) + Group Key: metric_dist.id + InitPlan 1 (returns $0) + -> Aggregate + Output: array_agg(metric_name.id) + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND ((id = ANY ($1::integer[]))) ORDER BY id ASC NULLS LAST +(15 rows) + +-- Multiple joins. Test both EC and non-EC (texteq) join in one query. +create table metric_location(id int, location text); +insert into metric_location values (1, 'Yerevan'), (3, 'Dilijan'), (7, 'Stepanakert'); +analyze metric_location; +select id, max(value) +from metric_dist natural join metric_location natural join metric_name +where name like 'cpu%' and texteq(location, 'Yerevan') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +; + id | max +----+------------------ + 1 | 49.9941974878311 +(1 row) + +explain (costs off, verbose) +select id, max(value) +from metric_dist natural join metric_location natural join metric_name +where name like 'cpu%' and texteq(location, 'Yerevan') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_dist.id, max(metric_dist.value) + Group Key: metric_dist.id + -> Sort + Output: metric_dist.id, metric_dist.value + Sort Key: metric_dist.id + -> Nested Loop + Output: metric_dist.id, metric_dist.value + -> Nested Loop + Output: metric_location.id, metric_name.id + Inner Unique: true + Join Filter: (metric_location.id = metric_name.id) + -> Seq Scan on public.metric_location + Output: metric_location.id, metric_location.location + Filter: texteq(metric_location.location, 'Yerevan'::text) + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(24 rows) + +-- Multiple joins on different variables. Use a table instead of a CTE for saner +-- stats. +create table max_value_times as +select distinct on (id) id, ts from metric_dist +where ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by id, value desc +; +analyze max_value_times; +explain (costs off, verbose) +select id, value +from metric_dist natural join max_value_times natural join metric_name +where name like 'cpu%' +order by 1 +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + Output: metric_dist.id, metric_dist.value + -> Nested Loop + Output: max_value_times.ts, max_value_times.id, metric_name.id + Join Filter: (max_value_times.id = metric_name.id) + -> Index Scan using metric_name_id on public.metric_name + Output: metric_name.id, metric_name.name + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Seq Scan on public.max_value_times + Output: max_value_times.id, max_value_times.ts + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value, metric_dist.ts + Data node: data_node_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) AND (($1::timestamp with time zone = ts)) AND (($2::integer = id)) +(15 rows) + +-- Two distributed hypertables, each joined to reference and all joined together. +-- The query finds the percentage of time points where one metric is higher than +-- another, and also tweaked not to use initplans.Requires hash join. +explain (analyze, verbose, costs off, timing off, summary off) +select count(*) filter (where m1.value > m2.value) / count(*) +from metric_dist m1 +join metric_dist m2 using (ts) +where m1.id in (select id from metric_name where name = 'cpu1') + and m2.id in (select id from metric_name where name = 'cpu3') +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + Output: (count(*) FILTER (WHERE (m1.value > m2.value)) / count(*)) + -> Hash Join (actual rows=91 loops=1) + Output: m1.value, m2.value + Hash Cond: (m1.ts = m2.ts) + -> Nested Loop (actual rows=50037 loops=1) + Output: m1.value, m1.ts + -> Index Scan using metric_name_name on public.metric_name (actual rows=1 loops=1) + Output: metric_name.id, metric_name.name + Index Cond: (metric_name.name = 'cpu1'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist m1 (actual rows=50037 loops=1) + Output: m1.value, m1.ts, m1.id + Data node: data_node_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) AND (($1::integer = id)) + -> Hash (actual rows=50101 loops=1) + Output: m2.value, m2.ts + Buckets: 65536 (originally 1024) Batches: 1 (originally 1) + -> Nested Loop (actual rows=50101 loops=1) + Output: m2.value, m2.ts + -> Index Scan using metric_name_name on public.metric_name metric_name_1 (actual rows=1 loops=1) + Output: metric_name_1.id, metric_name_1.name + Index Cond: (metric_name_1.name = 'cpu3'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist m2 (actual rows=50101 loops=1) + Output: m2.value, m2.ts, m2.id + Data node: data_node_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) AND (($1::integer = id)) +(30 rows) + +-- Should prefer reference table join pushdown to all other kinds of plans, +-- basically always. Note that we don't actually replicate the ref table here, +-- so EXPLAIN ANALYZE would fail. +set role :ROLE_CLUSTER_SUPERUSER; +alter foreign data wrapper timescaledb_fdw options (add reference_tables 'metric_name'); +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name using (id) +where name like 'cpu%' +group by name +order by name +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Result + Output: metric_name.name, metric_dist.value + -> Custom Scan (DataNodeScan) + Output: metric_dist.value, metric_name.name + Data node: data_node_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT r57.value, r2.name FROM (public.metric_dist r57 INNER JOIN public.metric_name r2 ON (((r57.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r57, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) ORDER BY r2.name ASC NULLS LAST +(10 rows) + +set timescaledb.enable_parameterized_data_node_scan to false; +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name using (id) +where name like 'cpu%' +group by name +order by name +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Result + Output: metric_name.name, metric_dist.value + -> Custom Scan (DataNodeScan) + Output: metric_dist.value, metric_name.name + Data node: data_node_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT r57.value, r2.name FROM (public.metric_dist r57 INNER JOIN public.metric_name r2 ON (((r57.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r57, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) ORDER BY r2.name ASC NULLS LAST +(10 rows) + diff --git a/tsl/test/expected/dist_param-14.out b/tsl/test/expected/dist_param-14.out new file mode 100644 index 00000000000..7809fd80f6f --- /dev/null +++ b/tsl/test/expected/dist_param-14.out @@ -0,0 +1,815 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Test parameterized data node scan. +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +\set DN_DBNAME_1 :TEST_DBNAME _1 +-- pg_regress doesn't drop these databases for repeated invocation such as in +-- the flaky check. +set client_min_messages to ERROR; +drop database if exists :"DN_DBNAME_1" with (force); +select 1 from add_data_node('data_node_1', host => 'localhost', + database => :'DN_DBNAME_1'); + ?column? +---------- + 1 +(1 row) + +grant usage on foreign server data_node_1 to public; +grant create on schema public to :ROLE_1; +set role :ROLE_1; +reset client_min_messages; +\set ON_ERROR_STOP 0 +-- helper function: float -> pseudorandom float [0..1]. +create or replace function mix(x float4) returns float4 as $$ select ((hashfloat4(x) / (pow(2., 31) - 1) + 1) / 2)::float4 $$ language sql; +-- distributed hypertable +create table metric_dist(ts timestamptz, id int, value float); +select create_distributed_hypertable('metric_dist', 'ts', 'id'); +WARNING: only one data node was assigned to the hypertable +NOTICE: adding not-null constraint to column "ts" + create_distributed_hypertable +------------------------------- + (1,public,metric_dist,t) +(1 row) + +insert into metric_dist + select '2022-02-02 02:02:02+03'::timestamptz + interval '1 year' * mix(x), + mix(x + 1.) * 20, + mix(x + 2.) * 50 + from generate_series(1, 1000000) x(x) +; +analyze metric_dist; +select count(*) from show_chunks('metric_dist'); + count +------- + 53 +(1 row) + +-- dictionary +create table metric_name(id int, name text collate "C", + constraint metric_name_name unique (name), + constraint metric_name_id primary key (id)); +insert into metric_name values (1, 'cpu1'), (3, 'cpu3'), (7, 'cpu7'); +insert into metric_name select x, 'other' || x + from generate_series(1000, 10000) x +; +analyze metric_name; +-- for predictable plans +set enable_hashagg to off; +set enable_material to off; +set enable_mergejoin to off; +-- not present on PG 12 +\set ECHO errors +-- Subquery + IN +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +explain (costs off, verbose) +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_dist.id, max(metric_dist.value), count(*) + Group Key: metric_dist.id + -> Sort + Output: metric_dist.id, metric_dist.value + Sort Key: metric_dist.id + -> Nested Loop + Output: metric_dist.id, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(17 rows) + +-- Check that the GUC to disable these plans works. Our cost model is very +-- heuristic and may be often wrong, so there needs to be a way to disable them. +set timescaledb.enable_parameterized_data_node_scan to false; +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +explain (costs off, verbose) +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: metric_dist.id, max(metric_dist.value), count(*) + Group Key: metric_dist.id + -> Sort + Output: metric_dist.id, metric_dist.value + Sort Key: metric_dist.id + -> Hash Join + Output: metric_dist.id, metric_dist.value + Inner Unique: true + Hash Cond: (metric_dist.id = metric_name.id) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) ORDER BY id ASC NULLS LAST + -> Hash + Output: metric_name.id + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) +(21 rows) + +reset timescaledb.enable_parameterized_data_node_scan; +-- All fetcher types with join +set timescaledb.remote_data_fetcher = 'copy'; +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; +ERROR: cannot use COPY fetcher because the plan is parameterized +set timescaledb.remote_data_fetcher = 'cursor'; +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +set timescaledb.remote_data_fetcher = 'prepared'; +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +-- All fetcher types with initplan +set timescaledb.remote_data_fetcher = 'copy'; +select id, max(value), count(*) +from metric_dist +where id = any((select array_agg(id) from metric_name where name like 'cpu%')::int[]) + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; +ERROR: cannot use COPY fetcher because the plan is parameterized +set timescaledb.remote_data_fetcher = 'cursor'; +select id, max(value), count(*) +from metric_dist +where id = any((select array_agg(id) from metric_name where name like 'cpu%')::int[]) + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +set timescaledb.remote_data_fetcher = 'prepared'; +select id, max(value), count(*) +from metric_dist +where id = any((select array_agg(id) from metric_name where name like 'cpu%')::int[]) + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +-- Should prefer prepared statement data fetcher for these queries. +set timescaledb.remote_data_fetcher = 'auto'; +explain (analyze, verbose, costs off, timing off, summary off) +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate (actual rows=3 loops=1) + Output: metric_dist.id, max(metric_dist.value), count(*) + Group Key: metric_dist.id + -> Sort (actual rows=12609 loops=1) + Output: metric_dist.id, metric_dist.value + Sort Key: metric_dist.id + Sort Method: quicksort + -> Nested Loop (actual rows=12609 loops=1) + Output: metric_dist.id, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name (actual rows=3 loops=1) + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist (actual rows=4203 loops=3) + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Fetcher Type: Prepared statement + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(19 rows) + +-- Should reset the prepared cache mode after using the prepared statement fetcher. +call distributed_exec('create or replace procedure assert_auto_plan_cache_mode() as $$ begin assert (select setting from pg_settings where name = ''plan_cache_mode'') = ''auto''; end; $$ language plpgsql;'); +call distributed_exec('call assert_auto_plan_cache_mode();'); +-- Shippable EC join +select name, max(value), count(*) +from metric_dist join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + name | max | count +------+------------------+------- + cpu1 | 49.9941974878311 | 4174 + cpu3 | 49.9958902597427 | 4119 + cpu7 | 49.9881327152252 | 4316 +(3 rows) + +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Nested Loop + Output: metric_name.name, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(14 rows) + +-- Shipping still might make sense if the local table is outer. +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist right join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Nested Loop + Output: metric_name.name, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(14 rows) + +-- Shipping doesn't make sense if the distributed table is outer. +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist +left join (select * from metric_name where name like 'cpu%') t using (id) +where ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Sort + Output: metric_name.name, metric_dist.value + Sort Key: metric_name.name COLLATE "C" + -> Hash Left Join + Output: metric_name.name, metric_dist.value + Inner Unique: true + Hash Cond: (metric_dist.id = metric_name.id) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) + -> Hash + Output: metric_name.name, metric_name.id + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.name, metric_name.id + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) +(21 rows) + +-- Non-shippable EC join +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name on name = concat('cpu', metric_dist.id) +where metric_name.name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Sort + Output: metric_name.name, metric_dist.value + Sort Key: metric_name.name COLLATE "C" + -> Hash Join + Output: metric_name.name, metric_dist.value + Inner Unique: true + Hash Cond: ((concat('cpu', metric_dist.id))::text = metric_name.name) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) + -> Hash + Output: metric_name.name + -> Index Only Scan using metric_name_name on public.metric_name + Output: metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) +(21 rows) + +-- Shippable non-EC join. The weird condition is to only use immutable functions +-- that can be shipped to the remote node. `id::text` does CoerceViaIO which is +-- not generally shippable. And `int4out` returns cstring, not text, that's why +-- the `textin` is needed. +select name, max(value), count(*) +from metric_dist join metric_name + on texteq('cpu' || textin(int4out(metric_dist.id)), name) +where metric_name.name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + name | max | count +------+------------------+------- + cpu1 | 49.9941974878311 | 4174 + cpu3 | 49.9958902597427 | 4119 + cpu7 | 49.9881327152252 | 4316 +(3 rows) + +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name + on texteq('cpu' || textin(int4out(metric_dist.id)), name) +where metric_name.name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Nested Loop + Output: metric_name.name, metric_dist.value + -> Index Only Scan using metric_name_name on public.metric_name + Output: metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (texteq(('cpu'::text || textin(int4out(id))), $1::text)) +(14 rows) + +-- Non-shippable non-EC join. +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name + on texteq(concat('cpu', textin(int4out(metric_dist.id))), name) +where metric_name.name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Nested Loop + Output: metric_name.name, metric_dist.value + Join Filter: texteq(concat('cpu', textin(int4out(metric_dist.id))), metric_name.name) + -> Index Only Scan using metric_name_name on public.metric_name + Output: metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) +(15 rows) + +-- distinct on, order by, limit 1, with subquery +select distinct on (id) + id, ts, value +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by id, ts, value +limit 1 +; + id | ts | value +----+----------------------------------+------------------ + 1 | Tue Feb 01 15:03:56.048 2022 PST | 36.1639380455017 +(1 row) + +explain (costs off, verbose) +select distinct on (id) + id, ts, value +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by id, ts, value +limit 1 +; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: metric_dist.id, metric_dist.ts, metric_dist.value + -> Unique + Output: metric_dist.id, metric_dist.ts, metric_dist.value + -> Sort + Output: metric_dist.id, metric_dist.ts, metric_dist.value + Sort Key: metric_dist.id, metric_dist.ts, metric_dist.value + -> Nested Loop + Output: metric_dist.id, metric_dist.ts, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.ts, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(18 rows) + +-- distinct on, order by, limit 1, with explicit join +select distinct on (name) + name, ts, value +from metric_dist join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by name, ts, value +limit 1 +; + name | ts | value +------+----------------------------------+------------------ + cpu1 | Tue Feb 01 15:03:56.048 2022 PST | 36.1639380455017 +(1 row) + +explain (costs off, verbose) +select distinct on (name) + name, ts, value +from metric_dist join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by name, ts, value +limit 1 +; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: metric_name.name, metric_dist.ts, metric_dist.value + -> Unique + Output: metric_name.name, metric_dist.ts, metric_dist.value + -> Sort + Output: metric_name.name, metric_dist.ts, metric_dist.value + Sort Key: metric_name.name COLLATE "C", metric_dist.ts, metric_dist.value + -> Nested Loop + Output: metric_name.name, metric_dist.ts, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.ts, metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(18 rows) + +-- If there are a lot of rows chosen from the local table, the parameterized +-- nested loop might download the entire dist table or even more than that (in +-- case of not equi-join or duplicate join keys). +-- Check that the parameterized plan is not chosen in this case. +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist +join metric_name using (id) +where ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Sort + Output: metric_name.name, metric_dist.value + Sort Key: metric_name.name COLLATE "C" + -> Hash Join + Output: metric_name.name, metric_dist.value + Inner Unique: true + Hash Cond: (metric_dist.id = metric_name.id) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) + -> Hash + Output: metric_name.name, metric_name.id + -> Seq Scan on public.metric_name + Output: metric_name.name, metric_name.id +(19 rows) + +-- An interesting special case is when the remote SQL has a parameter, but it is +-- the result of an initplan. It's not "parameterized" in the join sense, because +-- there is only one param value. This is the most efficient plan for querying a +-- small number of ids. +explain (costs off, verbose) +select id, max(value) +from metric_dist +where id = any((select array_agg(id) from metric_name where name like 'cpu%')::int[]) + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_dist.id, max(metric_dist.value) + Group Key: metric_dist.id + InitPlan 1 (returns $0) + -> Aggregate + Output: array_agg(metric_name.id) + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND ((id = ANY ($1::integer[]))) ORDER BY id ASC NULLS LAST +(15 rows) + +-- Multiple joins. Test both EC and non-EC (texteq) join in one query. +create table metric_location(id int, location text); +insert into metric_location values (1, 'Yerevan'), (3, 'Dilijan'), (7, 'Stepanakert'); +analyze metric_location; +select id, max(value) +from metric_dist natural join metric_location natural join metric_name +where name like 'cpu%' and texteq(location, 'Yerevan') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +; + id | max +----+------------------ + 1 | 49.9941974878311 +(1 row) + +explain (costs off, verbose) +select id, max(value) +from metric_dist natural join metric_location natural join metric_name +where name like 'cpu%' and texteq(location, 'Yerevan') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_dist.id, max(metric_dist.value) + Group Key: metric_dist.id + -> Sort + Output: metric_dist.id, metric_dist.value + Sort Key: metric_dist.id + -> Nested Loop + Output: metric_dist.id, metric_dist.value + -> Nested Loop + Output: metric_location.id, metric_name.id + Inner Unique: true + Join Filter: (metric_location.id = metric_name.id) + -> Seq Scan on public.metric_location + Output: metric_location.id, metric_location.location + Filter: texteq(metric_location.location, 'Yerevan'::text) + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(24 rows) + +-- Multiple joins on different variables. Use a table instead of a CTE for saner +-- stats. +create table max_value_times as +select distinct on (id) id, ts from metric_dist +where ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by id, value desc +; +analyze max_value_times; +explain (costs off, verbose) +select id, value +from metric_dist natural join max_value_times natural join metric_name +where name like 'cpu%' +order by 1 +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + Output: metric_dist.id, metric_dist.value + -> Nested Loop + Output: max_value_times.ts, max_value_times.id, metric_name.id + Join Filter: (max_value_times.id = metric_name.id) + -> Index Scan using metric_name_id on public.metric_name + Output: metric_name.id, metric_name.name + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Seq Scan on public.max_value_times + Output: max_value_times.id, max_value_times.ts + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value, metric_dist.ts + Data node: data_node_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) AND (($1::timestamp with time zone = ts)) AND (($2::integer = id)) +(15 rows) + +-- Two distributed hypertables, each joined to reference and all joined together. +-- The query finds the percentage of time points where one metric is higher than +-- another, and also tweaked not to use initplans.Requires hash join. +explain (analyze, verbose, costs off, timing off, summary off) +select count(*) filter (where m1.value > m2.value) / count(*) +from metric_dist m1 +join metric_dist m2 using (ts) +where m1.id in (select id from metric_name where name = 'cpu1') + and m2.id in (select id from metric_name where name = 'cpu3') +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + Output: (count(*) FILTER (WHERE (m1.value > m2.value)) / count(*)) + -> Hash Join (actual rows=91 loops=1) + Output: m1.value, m2.value + Hash Cond: (m1.ts = m2.ts) + -> Nested Loop (actual rows=50037 loops=1) + Output: m1.value, m1.ts + -> Index Scan using metric_name_name on public.metric_name (actual rows=1 loops=1) + Output: metric_name.id, metric_name.name + Index Cond: (metric_name.name = 'cpu1'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist m1 (actual rows=50037 loops=1) + Output: m1.value, m1.ts, m1.id + Data node: data_node_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) AND (($1::integer = id)) + -> Hash (actual rows=50101 loops=1) + Output: m2.value, m2.ts + Buckets: 65536 (originally 1024) Batches: 1 (originally 1) + -> Nested Loop (actual rows=50101 loops=1) + Output: m2.value, m2.ts + -> Index Scan using metric_name_name on public.metric_name metric_name_1 (actual rows=1 loops=1) + Output: metric_name_1.id, metric_name_1.name + Index Cond: (metric_name_1.name = 'cpu3'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist m2 (actual rows=50101 loops=1) + Output: m2.value, m2.ts, m2.id + Data node: data_node_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) AND (($1::integer = id)) +(30 rows) + +-- Should prefer reference table join pushdown to all other kinds of plans, +-- basically always. Note that we don't actually replicate the ref table here, +-- so EXPLAIN ANALYZE would fail. +set role :ROLE_CLUSTER_SUPERUSER; +alter foreign data wrapper timescaledb_fdw options (add reference_tables 'metric_name'); +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name using (id) +where name like 'cpu%' +group by name +order by name +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Result + Output: metric_name.name, metric_dist.value + -> Custom Scan (DataNodeScan) + Output: metric_dist.value, metric_name.name + Data node: data_node_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT r57.value, r2.name FROM (public.metric_dist r57 INNER JOIN public.metric_name r2 ON (((r57.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r57, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) ORDER BY r2.name ASC NULLS LAST +(10 rows) + +set timescaledb.enable_parameterized_data_node_scan to false; +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name using (id) +where name like 'cpu%' +group by name +order by name +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Result + Output: metric_name.name, metric_dist.value + -> Custom Scan (DataNodeScan) + Output: metric_dist.value, metric_name.name + Data node: data_node_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT r57.value, r2.name FROM (public.metric_dist r57 INNER JOIN public.metric_name r2 ON (((r57.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r57, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) ORDER BY r2.name ASC NULLS LAST +(10 rows) + diff --git a/tsl/test/expected/dist_param-15.out b/tsl/test/expected/dist_param-15.out new file mode 100644 index 00000000000..7809fd80f6f --- /dev/null +++ b/tsl/test/expected/dist_param-15.out @@ -0,0 +1,815 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Test parameterized data node scan. +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +\set DN_DBNAME_1 :TEST_DBNAME _1 +-- pg_regress doesn't drop these databases for repeated invocation such as in +-- the flaky check. +set client_min_messages to ERROR; +drop database if exists :"DN_DBNAME_1" with (force); +select 1 from add_data_node('data_node_1', host => 'localhost', + database => :'DN_DBNAME_1'); + ?column? +---------- + 1 +(1 row) + +grant usage on foreign server data_node_1 to public; +grant create on schema public to :ROLE_1; +set role :ROLE_1; +reset client_min_messages; +\set ON_ERROR_STOP 0 +-- helper function: float -> pseudorandom float [0..1]. +create or replace function mix(x float4) returns float4 as $$ select ((hashfloat4(x) / (pow(2., 31) - 1) + 1) / 2)::float4 $$ language sql; +-- distributed hypertable +create table metric_dist(ts timestamptz, id int, value float); +select create_distributed_hypertable('metric_dist', 'ts', 'id'); +WARNING: only one data node was assigned to the hypertable +NOTICE: adding not-null constraint to column "ts" + create_distributed_hypertable +------------------------------- + (1,public,metric_dist,t) +(1 row) + +insert into metric_dist + select '2022-02-02 02:02:02+03'::timestamptz + interval '1 year' * mix(x), + mix(x + 1.) * 20, + mix(x + 2.) * 50 + from generate_series(1, 1000000) x(x) +; +analyze metric_dist; +select count(*) from show_chunks('metric_dist'); + count +------- + 53 +(1 row) + +-- dictionary +create table metric_name(id int, name text collate "C", + constraint metric_name_name unique (name), + constraint metric_name_id primary key (id)); +insert into metric_name values (1, 'cpu1'), (3, 'cpu3'), (7, 'cpu7'); +insert into metric_name select x, 'other' || x + from generate_series(1000, 10000) x +; +analyze metric_name; +-- for predictable plans +set enable_hashagg to off; +set enable_material to off; +set enable_mergejoin to off; +-- not present on PG 12 +\set ECHO errors +-- Subquery + IN +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +explain (costs off, verbose) +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_dist.id, max(metric_dist.value), count(*) + Group Key: metric_dist.id + -> Sort + Output: metric_dist.id, metric_dist.value + Sort Key: metric_dist.id + -> Nested Loop + Output: metric_dist.id, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(17 rows) + +-- Check that the GUC to disable these plans works. Our cost model is very +-- heuristic and may be often wrong, so there needs to be a way to disable them. +set timescaledb.enable_parameterized_data_node_scan to false; +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +explain (costs off, verbose) +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + GroupAggregate + Output: metric_dist.id, max(metric_dist.value), count(*) + Group Key: metric_dist.id + -> Sort + Output: metric_dist.id, metric_dist.value + Sort Key: metric_dist.id + -> Hash Join + Output: metric_dist.id, metric_dist.value + Inner Unique: true + Hash Cond: (metric_dist.id = metric_name.id) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) ORDER BY id ASC NULLS LAST + -> Hash + Output: metric_name.id + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) +(21 rows) + +reset timescaledb.enable_parameterized_data_node_scan; +-- All fetcher types with join +set timescaledb.remote_data_fetcher = 'copy'; +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; +ERROR: cannot use COPY fetcher because the plan is parameterized +set timescaledb.remote_data_fetcher = 'cursor'; +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +set timescaledb.remote_data_fetcher = 'prepared'; +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +-- All fetcher types with initplan +set timescaledb.remote_data_fetcher = 'copy'; +select id, max(value), count(*) +from metric_dist +where id = any((select array_agg(id) from metric_name where name like 'cpu%')::int[]) + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; +ERROR: cannot use COPY fetcher because the plan is parameterized +set timescaledb.remote_data_fetcher = 'cursor'; +select id, max(value), count(*) +from metric_dist +where id = any((select array_agg(id) from metric_name where name like 'cpu%')::int[]) + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +set timescaledb.remote_data_fetcher = 'prepared'; +select id, max(value), count(*) +from metric_dist +where id = any((select array_agg(id) from metric_name where name like 'cpu%')::int[]) + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + id | max | count +----+------------------+------- + 1 | 49.9941974878311 | 4174 + 3 | 49.9958902597427 | 4119 + 7 | 49.9881327152252 | 4316 +(3 rows) + +-- Should prefer prepared statement data fetcher for these queries. +set timescaledb.remote_data_fetcher = 'auto'; +explain (analyze, verbose, costs off, timing off, summary off) +select id, max(value), count(*) +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate (actual rows=3 loops=1) + Output: metric_dist.id, max(metric_dist.value), count(*) + Group Key: metric_dist.id + -> Sort (actual rows=12609 loops=1) + Output: metric_dist.id, metric_dist.value + Sort Key: metric_dist.id + Sort Method: quicksort + -> Nested Loop (actual rows=12609 loops=1) + Output: metric_dist.id, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name (actual rows=3 loops=1) + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist (actual rows=4203 loops=3) + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Fetcher Type: Prepared statement + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(19 rows) + +-- Should reset the prepared cache mode after using the prepared statement fetcher. +call distributed_exec('create or replace procedure assert_auto_plan_cache_mode() as $$ begin assert (select setting from pg_settings where name = ''plan_cache_mode'') = ''auto''; end; $$ language plpgsql;'); +call distributed_exec('call assert_auto_plan_cache_mode();'); +-- Shippable EC join +select name, max(value), count(*) +from metric_dist join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + name | max | count +------+------------------+------- + cpu1 | 49.9941974878311 | 4174 + cpu3 | 49.9958902597427 | 4119 + cpu7 | 49.9881327152252 | 4316 +(3 rows) + +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Nested Loop + Output: metric_name.name, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(14 rows) + +-- Shipping still might make sense if the local table is outer. +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist right join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Nested Loop + Output: metric_name.name, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(14 rows) + +-- Shipping doesn't make sense if the distributed table is outer. +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist +left join (select * from metric_name where name like 'cpu%') t using (id) +where ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Sort + Output: metric_name.name, metric_dist.value + Sort Key: metric_name.name COLLATE "C" + -> Hash Left Join + Output: metric_name.name, metric_dist.value + Inner Unique: true + Hash Cond: (metric_dist.id = metric_name.id) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) + -> Hash + Output: metric_name.name, metric_name.id + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.name, metric_name.id + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) +(21 rows) + +-- Non-shippable EC join +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name on name = concat('cpu', metric_dist.id) +where metric_name.name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Sort + Output: metric_name.name, metric_dist.value + Sort Key: metric_name.name COLLATE "C" + -> Hash Join + Output: metric_name.name, metric_dist.value + Inner Unique: true + Hash Cond: ((concat('cpu', metric_dist.id))::text = metric_name.name) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) + -> Hash + Output: metric_name.name + -> Index Only Scan using metric_name_name on public.metric_name + Output: metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) +(21 rows) + +-- Shippable non-EC join. The weird condition is to only use immutable functions +-- that can be shipped to the remote node. `id::text` does CoerceViaIO which is +-- not generally shippable. And `int4out` returns cstring, not text, that's why +-- the `textin` is needed. +select name, max(value), count(*) +from metric_dist join metric_name + on texteq('cpu' || textin(int4out(metric_dist.id)), name) +where metric_name.name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + name | max | count +------+------------------+------- + cpu1 | 49.9941974878311 | 4174 + cpu3 | 49.9958902597427 | 4119 + cpu7 | 49.9881327152252 | 4316 +(3 rows) + +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name + on texteq('cpu' || textin(int4out(metric_dist.id)), name) +where metric_name.name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Nested Loop + Output: metric_name.name, metric_dist.value + -> Index Only Scan using metric_name_name on public.metric_name + Output: metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (texteq(('cpu'::text || textin(int4out(id))), $1::text)) +(14 rows) + +-- Non-shippable non-EC join. +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name + on texteq(concat('cpu', textin(int4out(metric_dist.id))), name) +where metric_name.name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Nested Loop + Output: metric_name.name, metric_dist.value + Join Filter: texteq(concat('cpu', textin(int4out(metric_dist.id))), metric_name.name) + -> Index Only Scan using metric_name_name on public.metric_name + Output: metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) +(15 rows) + +-- distinct on, order by, limit 1, with subquery +select distinct on (id) + id, ts, value +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by id, ts, value +limit 1 +; + id | ts | value +----+----------------------------------+------------------ + 1 | Tue Feb 01 15:03:56.048 2022 PST | 36.1639380455017 +(1 row) + +explain (costs off, verbose) +select distinct on (id) + id, ts, value +from metric_dist +where id in (select id from metric_name where name like 'cpu%') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by id, ts, value +limit 1 +; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: metric_dist.id, metric_dist.ts, metric_dist.value + -> Unique + Output: metric_dist.id, metric_dist.ts, metric_dist.value + -> Sort + Output: metric_dist.id, metric_dist.ts, metric_dist.value + Sort Key: metric_dist.id, metric_dist.ts, metric_dist.value + -> Nested Loop + Output: metric_dist.id, metric_dist.ts, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.ts, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(18 rows) + +-- distinct on, order by, limit 1, with explicit join +select distinct on (name) + name, ts, value +from metric_dist join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by name, ts, value +limit 1 +; + name | ts | value +------+----------------------------------+------------------ + cpu1 | Tue Feb 01 15:03:56.048 2022 PST | 36.1639380455017 +(1 row) + +explain (costs off, verbose) +select distinct on (name) + name, ts, value +from metric_dist join metric_name using (id) +where name like 'cpu%' + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by name, ts, value +limit 1 +; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: metric_name.name, metric_dist.ts, metric_dist.value + -> Unique + Output: metric_name.name, metric_dist.ts, metric_dist.value + -> Sort + Output: metric_name.name, metric_dist.ts, metric_dist.value + Sort Key: metric_name.name COLLATE "C", metric_dist.ts, metric_dist.value + -> Nested Loop + Output: metric_name.name, metric_dist.ts, metric_dist.value + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.ts, metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(18 rows) + +-- If there are a lot of rows chosen from the local table, the parameterized +-- nested loop might download the entire dist table or even more than that (in +-- case of not equi-join or duplicate join keys). +-- Check that the parameterized plan is not chosen in this case. +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist +join metric_name using (id) +where ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by name +order by name +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Sort + Output: metric_name.name, metric_dist.value + Sort Key: metric_name.name COLLATE "C" + -> Hash Join + Output: metric_name.name, metric_dist.value + Inner Unique: true + Hash Cond: (metric_dist.id = metric_name.id) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.value, metric_dist.id + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) + -> Hash + Output: metric_name.name, metric_name.id + -> Seq Scan on public.metric_name + Output: metric_name.name, metric_name.id +(19 rows) + +-- An interesting special case is when the remote SQL has a parameter, but it is +-- the result of an initplan. It's not "parameterized" in the join sense, because +-- there is only one param value. This is the most efficient plan for querying a +-- small number of ids. +explain (costs off, verbose) +select id, max(value) +from metric_dist +where id = any((select array_agg(id) from metric_name where name like 'cpu%')::int[]) + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +order by id +; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_dist.id, max(metric_dist.value) + Group Key: metric_dist.id + InitPlan 1 (returns $0) + -> Aggregate + Output: array_agg(metric_name.id) + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND ((id = ANY ($1::integer[]))) ORDER BY id ASC NULLS LAST +(15 rows) + +-- Multiple joins. Test both EC and non-EC (texteq) join in one query. +create table metric_location(id int, location text); +insert into metric_location values (1, 'Yerevan'), (3, 'Dilijan'), (7, 'Stepanakert'); +analyze metric_location; +select id, max(value) +from metric_dist natural join metric_location natural join metric_name +where name like 'cpu%' and texteq(location, 'Yerevan') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +; + id | max +----+------------------ + 1 | 49.9941974878311 +(1 row) + +explain (costs off, verbose) +select id, max(value) +from metric_dist natural join metric_location natural join metric_name +where name like 'cpu%' and texteq(location, 'Yerevan') + and ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +group by id +; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_dist.id, max(metric_dist.value) + Group Key: metric_dist.id + -> Sort + Output: metric_dist.id, metric_dist.value + Sort Key: metric_dist.id + -> Nested Loop + Output: metric_dist.id, metric_dist.value + -> Nested Loop + Output: metric_location.id, metric_name.id + Inner Unique: true + Join Filter: (metric_location.id = metric_name.id) + -> Seq Scan on public.metric_location + Output: metric_location.id, metric_location.location + Filter: texteq(metric_location.location, 'Yerevan'::text) + -> Index Scan using metric_name_name on public.metric_name + Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value + Data node: data_node_1 + Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk + Remote SQL: SELECT id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) +(24 rows) + +-- Multiple joins on different variables. Use a table instead of a CTE for saner +-- stats. +create table max_value_times as +select distinct on (id) id, ts from metric_dist +where ts between '2022-02-02 02:02:02+03' and '2022-03-03 02:02:02+03' +order by id, value desc +; +analyze max_value_times; +explain (costs off, verbose) +select id, value +from metric_dist natural join max_value_times natural join metric_name +where name like 'cpu%' +order by 1 +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + Output: metric_dist.id, metric_dist.value + -> Nested Loop + Output: max_value_times.ts, max_value_times.id, metric_name.id + Join Filter: (max_value_times.id = metric_name.id) + -> Index Scan using metric_name_id on public.metric_name + Output: metric_name.id, metric_name.name + Filter: (metric_name.name ~~ 'cpu%'::text) + -> Seq Scan on public.max_value_times + Output: max_value_times.id, max_value_times.ts + -> Custom Scan (DataNodeScan) on public.metric_dist + Output: metric_dist.id, metric_dist.value, metric_dist.ts + Data node: data_node_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) AND (($1::timestamp with time zone = ts)) AND (($2::integer = id)) +(15 rows) + +-- Two distributed hypertables, each joined to reference and all joined together. +-- The query finds the percentage of time points where one metric is higher than +-- another, and also tweaked not to use initplans.Requires hash join. +explain (analyze, verbose, costs off, timing off, summary off) +select count(*) filter (where m1.value > m2.value) / count(*) +from metric_dist m1 +join metric_dist m2 using (ts) +where m1.id in (select id from metric_name where name = 'cpu1') + and m2.id in (select id from metric_name where name = 'cpu3') +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + Output: (count(*) FILTER (WHERE (m1.value > m2.value)) / count(*)) + -> Hash Join (actual rows=91 loops=1) + Output: m1.value, m2.value + Hash Cond: (m1.ts = m2.ts) + -> Nested Loop (actual rows=50037 loops=1) + Output: m1.value, m1.ts + -> Index Scan using metric_name_name on public.metric_name (actual rows=1 loops=1) + Output: metric_name.id, metric_name.name + Index Cond: (metric_name.name = 'cpu1'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist m1 (actual rows=50037 loops=1) + Output: m1.value, m1.ts, m1.id + Data node: data_node_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) AND (($1::integer = id)) + -> Hash (actual rows=50101 loops=1) + Output: m2.value, m2.ts + Buckets: 65536 (originally 1024) Batches: 1 (originally 1) + -> Nested Loop (actual rows=50101 loops=1) + Output: m2.value, m2.ts + -> Index Scan using metric_name_name on public.metric_name metric_name_1 (actual rows=1 loops=1) + Output: metric_name_1.id, metric_name_1.name + Index Cond: (metric_name_1.name = 'cpu3'::text) + -> Custom Scan (DataNodeScan) on public.metric_dist m2 (actual rows=50101 loops=1) + Output: m2.value, m2.ts, m2.id + Data node: data_node_1 + Fetcher Type: Cursor + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) AND (($1::integer = id)) +(30 rows) + +-- Should prefer reference table join pushdown to all other kinds of plans, +-- basically always. Note that we don't actually replicate the ref table here, +-- so EXPLAIN ANALYZE would fail. +set role :ROLE_CLUSTER_SUPERUSER; +alter foreign data wrapper timescaledb_fdw options (add reference_tables 'metric_name'); +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name using (id) +where name like 'cpu%' +group by name +order by name +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Result + Output: metric_name.name, metric_dist.value + -> Custom Scan (DataNodeScan) + Output: metric_dist.value, metric_name.name + Data node: data_node_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT r57.value, r2.name FROM (public.metric_dist r57 INNER JOIN public.metric_name r2 ON (((r57.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r57, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) ORDER BY r2.name ASC NULLS LAST +(10 rows) + +set timescaledb.enable_parameterized_data_node_scan to false; +explain (costs off, verbose) +select name, max(value), count(*) +from metric_dist join metric_name using (id) +where name like 'cpu%' +group by name +order by name +; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: metric_name.name, max(metric_dist.value), count(*) + Group Key: metric_name.name + -> Result + Output: metric_name.name, metric_dist.value + -> Custom Scan (DataNodeScan) + Output: metric_dist.value, metric_name.name + Data node: data_node_1 + Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_2_chunk, _dist_hyper_1_3_chunk, _dist_hyper_1_4_chunk, _dist_hyper_1_5_chunk, _dist_hyper_1_6_chunk, _dist_hyper_1_7_chunk, _dist_hyper_1_8_chunk, _dist_hyper_1_9_chunk, _dist_hyper_1_10_chunk, _dist_hyper_1_11_chunk, _dist_hyper_1_12_chunk, _dist_hyper_1_13_chunk, _dist_hyper_1_14_chunk, _dist_hyper_1_15_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_17_chunk, _dist_hyper_1_18_chunk, _dist_hyper_1_19_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_21_chunk, _dist_hyper_1_22_chunk, _dist_hyper_1_23_chunk, _dist_hyper_1_24_chunk, _dist_hyper_1_25_chunk, _dist_hyper_1_26_chunk, _dist_hyper_1_27_chunk, _dist_hyper_1_28_chunk, _dist_hyper_1_29_chunk, _dist_hyper_1_30_chunk, _dist_hyper_1_31_chunk, _dist_hyper_1_32_chunk, _dist_hyper_1_33_chunk, _dist_hyper_1_34_chunk, _dist_hyper_1_35_chunk, _dist_hyper_1_36_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_38_chunk, _dist_hyper_1_39_chunk, _dist_hyper_1_40_chunk, _dist_hyper_1_41_chunk, _dist_hyper_1_42_chunk, _dist_hyper_1_43_chunk, _dist_hyper_1_44_chunk, _dist_hyper_1_45_chunk, _dist_hyper_1_46_chunk, _dist_hyper_1_47_chunk, _dist_hyper_1_48_chunk, _dist_hyper_1_49_chunk, _dist_hyper_1_50_chunk, _dist_hyper_1_51_chunk, _dist_hyper_1_52_chunk, _dist_hyper_1_53_chunk + Remote SQL: SELECT r57.value, r2.name FROM (public.metric_dist r57 INNER JOIN public.metric_name r2 ON (((r57.id = r2.id)) AND ((r2.name ~~ 'cpu%'::text)))) WHERE _timescaledb_functions.chunks_in(r57, ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]) ORDER BY r2.name ASC NULLS LAST +(10 rows) + diff --git a/tsl/test/expected/dist_param.out b/tsl/test/expected/dist_param-16.out similarity index 100% rename from tsl/test/expected/dist_param.out rename to tsl/test/expected/dist_param-16.out diff --git a/tsl/test/sql/.gitignore b/tsl/test/sql/.gitignore index 7c80876e95a..df3e5dcb090 100644 --- a/tsl/test/sql/.gitignore +++ b/tsl/test/sql/.gitignore @@ -1,5 +1,6 @@ /*.pgbinary /cagg_bgw-*.sql +/cagg_bgw_dist_ht-*.sql /cagg_ddl-*.sql /cagg_ddl_dist_ht-*.sql /cagg_errors_deprecated-*.sql @@ -16,6 +17,7 @@ /continuous_aggs_deprecated-*.sql /deparse-*.sql /dist_grant-*.sql +/dist_param-*.sql /dist_query-*.sql /dist_hypertable-*.sql /dist_partial_agg-*.sql diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 9a623ffee2d..88cbace8f89 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -31,7 +31,7 @@ set(TEST_FILES size_utils_tsl.sql) if(ENABLE_MULTINODE_TESTS) - list(APPEND TEST_FILES dist_param.sql dist_views.sql) + list(APPEND TEST_FILES dist_views.sql) endif() if(USE_TELEMETRY) @@ -92,7 +92,6 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) list( APPEND TEST_FILES - cagg_bgw_dist_ht.sql cagg_migrate_dist_ht.sql cagg_on_cagg_dist_ht.sql cagg_on_cagg_joins_dist_ht.sql @@ -171,6 +170,10 @@ set(TEST_TEMPLATES transparent_decompression_ordered_index.sql.in merge_append_partially_compressed.sql.in) +if(ENABLE_MULTINODE_TESTS) + list(APPEND TEST_TEMPLATES dist_param.sql.in) +endif() + # This test runs only with PG version >= 14 if((${PG_VERSION_MAJOR} GREATER_EQUAL "14")) set(TEST_FILES_ON_VERSION_GE_14 modify_exclusion.sql.in) @@ -194,6 +197,7 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) list( APPEND TEST_TEMPLATES + cagg_bgw_dist_ht.sql.in cagg_ddl_dist_ht.sql.in cagg_invalidation_dist_ht.sql.in dist_hypertable.sql.in diff --git a/tsl/test/sql/cagg_bgw_dist_ht.sql b/tsl/test/sql/cagg_bgw_dist_ht.sql.in similarity index 100% rename from tsl/test/sql/cagg_bgw_dist_ht.sql rename to tsl/test/sql/cagg_bgw_dist_ht.sql.in diff --git a/tsl/test/sql/dist_param.sql b/tsl/test/sql/dist_param.sql.in similarity index 100% rename from tsl/test/sql/dist_param.sql rename to tsl/test/sql/dist_param.sql.in