diff --git a/.github/workflows/linux-32bit-build-and-test.yaml b/.github/workflows/linux-32bit-build-and-test.yaml index 49deb158057..93d22fab814 100644 --- a/.github/workflows/linux-32bit-build-and-test.yaml +++ b/.github/workflows/linux-32bit-build-and-test.yaml @@ -42,17 +42,14 @@ jobs: CXX: clang++-14 DEBIAN_FRONTEND: noninteractive IGNORES: "append-* debug_notice transparent_decompression-* - transparent_decompress_chunk-* pg_dump + transparent_decompress_chunk-* pg_dump partialize_finalize dist_move_chunk dist_param dist_insert remote_txn telemetry" - SKIPS: chunk_adaptive histogram_test + SKIPS: chunk_adaptive histogram_test-* strategy: fail-fast: false matrix: pg: ${{ fromJson(needs.config.outputs.pg_latest) }} build_type: [ Debug ] - include: - - pg: ${{ fromJson(needs.config.outputs.pg15_latest) }} - ignores_version: partialize_finalize steps: @@ -135,8 +132,8 @@ jobs: set -o pipefail export LANG=C.UTF-8 # PostgreSQL cannot be run as root. So, switch to postgres user. - sudo -u postgres make -k -C build installcheck IGNORES="${IGNORES} \ - ${{ matrix.ignores_version }}" SKIPS="${SKIPS}" | tee installcheck.log + sudo -u postgres make -k -C build installcheck IGNORES="${IGNORES}" \ + SKIPS="${SKIPS}" | tee installcheck.log - name: Show regression diffs if: always() diff --git a/.github/workflows/windows-build-and-test.yaml b/.github/workflows/windows-build-and-test.yaml index 3a5be1a1b38..53aad8c61a8 100644 --- a/.github/workflows/windows-build-and-test.yaml +++ b/.github/workflows/windows-build-and-test.yaml @@ -56,7 +56,7 @@ jobs: os: [ windows-2022 ] build_type: ${{ fromJson(needs.config.outputs.build_type) }} ignores: ["chunk_adaptive metadata telemetry"] - tsl_ignores: ["compression_algos remote_connection telemetry_stats-13 telemetry_stats-14 dist_move_chunk dist_param dist_insert dist_backup dist_cagg"] + tsl_ignores: ["compression_algos remote_connection telemetry_stats dist_move_chunk dist_param dist_insert dist_backup dist_cagg partialize_finalize"] tsl_skips: ["bgw_db_scheduler bgw_db_scheduler_fixed cagg_ddl_dist_ht data_fetcher dist_compression dist_remote_error remote_txn"] pg_config: ["-cfsync=off -cstatement_timeout=60s"] @@ -72,7 +72,6 @@ jobs: # pkg_version: ${{ fromJson(needs.config.outputs.pg15_latest) }} pkg_version: 15.0.1 # hardcoded due to issues with PG15.1 on chocolatey tsl_skips_version: dist_partial_agg-15 dist_grant-15 - tsl_ignores_version: partialize_finalize env: # PostgreSQL configuration PGPORT: 55432 diff --git a/sql/updates/pre-update.sql b/sql/updates/pre-update.sql index 018556fe546..899b2801aea 100644 --- a/sql/updates/pre-update.sql +++ b/sql/updates/pre-update.sql @@ -51,3 +51,35 @@ WHERE ) ; +-- ERROR if trying to update the extension on PG16 using Multi-Node +DO $$ +DECLARE + data_nodes TEXT; + dist_hypertables TEXT; +BEGIN + IF current_setting('server_version_num')::int >= 160000 THEN + SELECT string_agg(format('%I.%I', hypertable_schema, hypertable_name), ', ') + INTO dist_hypertables + FROM timescaledb_information.hypertables + WHERE is_distributed IS TRUE; + + IF dist_hypertables IS NOT NULL THEN + RAISE USING + ERRCODE = 'feature_not_supported', + MESSAGE = 'cannot upgrade because multi-node is not supported on PostgreSQL >= 16', + DETAIL = 'The following distributed hypertables should be migrated to regular: '||dist_hypertables; + END IF; + + SELECT string_agg(format('%I', node_name), ', ') + INTO data_nodes + FROM timescaledb_information.data_nodes; + + IF data_nodes IS NOT NULL THEN + RAISE USING + ERRCODE = 'feature_not_supported', + MESSAGE = 'cannot upgrade because multi-node is not supported on PostgreSQL >= 16', + DETAIL = 'The following data nodes should be removed: '||data_nodes; + END IF; + END IF; +END $$; + diff --git a/src/hypertable.c b/src/hypertable.c index 08ab3b87940..cbea0bbbc6d 100644 --- a/src/hypertable.c +++ b/src/hypertable.c @@ -68,6 +68,7 @@ #include "debug_assert.h" #include "osm_callbacks.h" #include "error_utils.h" +#include "compat/compat.h" Oid ts_rel_get_owner(Oid relid) @@ -2046,6 +2047,17 @@ ts_hypertable_create(PG_FUNCTION_ARGS) Datum ts_hypertable_distributed_create(PG_FUNCTION_ARGS) { +#if PG16_GE + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("distributed hypertable is not supported"), + errdetail("Multi-node is not supported anymore on PostgreSQL >= 16."))); +#else + ereport(WARNING, + (errcode(ERRCODE_WARNING_DEPRECATED_FEATURE), + errmsg("distributed hypertable is deprecated"), + errdetail("Multi-node is deprecated and will be removed in future releases."))); +#endif return ts_hypertable_create_time_prev(fcinfo, true); } diff --git a/test/expected/cluster-16.out b/test/expected/cluster-16.out index 630262f2a38..9c1305ffc55 100644 --- a/test/expected/cluster-16.out +++ b/test/expected/cluster-16.out @@ -60,12 +60,12 @@ WHERE indisclustered = true ORDER BY 1; -- Reorder all tables (although will only be our test table) CLUSTER VERBOSE; +INFO: clustering "public.cluster_test" using sequential scan and sort +INFO: "public.cluster_test": found 0 removable, 0 nonremovable row versions in 0 pages INFO: clustering "_timescaledb_internal._hyper_1_1_chunk" using sequential scan and sort INFO: "_timescaledb_internal._hyper_1_1_chunk": found 0 removable, 1 nonremovable row versions in 1 pages INFO: clustering "_timescaledb_internal._hyper_1_2_chunk" using sequential scan and sort INFO: "_timescaledb_internal._hyper_1_2_chunk": found 0 removable, 1 nonremovable row versions in 1 pages -INFO: clustering "public.cluster_test" using sequential scan and sort -INFO: "public.cluster_test": found 0 removable, 0 nonremovable row versions in 0 pages INFO: clustering "_timescaledb_internal._hyper_1_3_chunk" using sequential scan and sort INFO: "_timescaledb_internal._hyper_1_3_chunk": found 0 removable, 1 nonremovable row versions in 1 pages -- Change the clustered index diff --git a/test/expected/histogram_test.out b/test/expected/histogram_test-13.out similarity index 98% rename from test/expected/histogram_test.out rename to test/expected/histogram_test-13.out index 3f933edef19..97d58601180 100644 --- a/test/expected/histogram_test.out +++ b/test/expected/histogram_test-13.out @@ -120,7 +120,7 @@ INSERT INTO weather VALUES ('2023-02-10 08:58:59.999999+00','city1',10.3), ('2023-03-23 06:12:02.73765+00 ','city1', 9.7), ('2023-03-23 06:12:06.990998+00','city1',11.7); --- This will currently generate an error. +-- This will currently generate an error on PG15 and prior versions \set ON_ERROR_STOP 0 SELECT histogram(temperature, -1.79769e+308, 1.79769e+308,10) FROM weather GROUP BY city; ERROR: index -2147483648 from "width_bucket" out of range diff --git a/test/expected/histogram_test-14.out b/test/expected/histogram_test-14.out new file mode 100644 index 00000000000..97d58601180 --- /dev/null +++ b/test/expected/histogram_test-14.out @@ -0,0 +1,127 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- table 1 +CREATE TABLE "hitest1"(key real, val varchar(40)); +-- insertions +INSERT INTO "hitest1" VALUES(0, 'hi'); +INSERT INTO "hitest1" VALUES(1, 'sup'); +INSERT INTO "hitest1" VALUES(2, 'hello'); +INSERT INTO "hitest1" VALUES(3, 'yo'); +INSERT INTO "hitest1" VALUES(4, 'howdy'); +INSERT INTO "hitest1" VALUES(5, 'hola'); +INSERT INTO "hitest1" VALUES(6, 'ya'); +INSERT INTO "hitest1" VALUES(1, 'sup'); +INSERT INTO "hitest1" VALUES(2, 'hello'); +INSERT INTO "hitest1" VALUES(1, 'sup'); +-- table 2 +CREATE TABLE "hitest2"(name varchar(30), score integer, qualify boolean); +-- insertions +INSERT INTO "hitest2" VALUES('Tom', 6, TRUE); +INSERT INTO "hitest2" VALUES('Mary', 4, FALSE); +INSERT INTO "hitest2" VALUES('Jaq', 3, FALSE); +INSERT INTO "hitest2" VALUES('Jane', 10, TRUE); +-- standard 2 bucket +SELECT histogram(key, 0, 9, 2) FROM hitest1; + histogram +----------- + {0,8,2,0} +(1 row) + +-- standard multi-bucket +SELECT histogram(key, 0, 9, 5) FROM hitest1; + histogram +----------------- + {0,4,3,2,1,0,0} +(1 row) + +-- standard 3 bucket +SELECT val, histogram(key, 0, 7, 3) FROM hitest1 GROUP BY val ORDER BY val; + val | histogram +-------+------------- + hello | {0,2,0,0,0} + hi | {0,1,0,0,0} + hola | {0,0,0,1,0} + howdy | {0,0,1,0,0} + sup | {0,3,0,0,0} + ya | {0,0,0,1,0} + yo | {0,0,1,0,0} +(7 rows) + +-- standard element beneath lb +SELECT histogram(key, 1, 7, 3) FROM hitest1; + histogram +------------- + {1,5,2,2,0} +(1 row) + +-- standard element above ub +SELECT histogram(key, 0, 3, 3) FROM hitest1; + histogram +------------- + {0,1,3,2,4} +(1 row) + +-- standard element beneath and above lb and ub, respectively +SELECT histogram(key, 1, 3, 2) FROM hitest1; + histogram +----------- + {1,3,2,4} +(1 row) + +-- standard 1 bucket +SELECT histogram(key, 1, 3, 1) FROM hitest1; + histogram +----------- + {1,5,4} +(1 row) + +-- standard 2 bucket +SELECT qualify, histogram(score, 0, 10, 2) FROM hitest2 GROUP BY qualify ORDER BY qualify; + qualify | histogram +---------+----------- + f | {0,2,0,0} + t | {0,0,1,1} +(2 rows) + +-- standard multi-bucket +SELECT qualify, histogram(score, 0, 10, 5) FROM hitest2 GROUP BY qualify ORDER BY qualify; + qualify | histogram +---------+----------------- + f | {0,0,1,1,0,0,0} + t | {0,0,0,0,1,0,1} +(2 rows) + +-- check number of buckets is constant +\set ON_ERROR_STOP 0 +select histogram(i,10,90,case when i=1 then 1 else 1000000 end) FROM generate_series(1,100) i; +ERROR: number of buckets must not change between calls +\set ON_ERROR_STOP 1 +CREATE TABLE weather ( + time TIMESTAMPTZ NOT NULL, + city TEXT, + temperature FLOAT, + PRIMARY KEY(time, city) +); +-- There is a bug in width_bucket() causing a NaN as a result, so we +-- check that it is not causing a crash in histogram(). +SELECT * FROM create_hypertable('weather', 'time', 'city', 3); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 1 | public | weather | t +(1 row) + +INSERT INTO weather VALUES + ('2023-02-10 09:16:51.133584+00','city1',10.4), + ('2023-02-10 11:16:51.611618+00','city1',10.3), + ('2023-02-10 06:58:59.999999+00','city1',10.3), + ('2023-02-10 01:58:59.999999+00','city1',10.3), + ('2023-02-09 01:58:59.999999+00','city1',10.3), + ('2023-02-10 08:58:59.999999+00','city1',10.3), + ('2023-03-23 06:12:02.73765+00 ','city1', 9.7), + ('2023-03-23 06:12:06.990998+00','city1',11.7); +-- This will currently generate an error on PG15 and prior versions +\set ON_ERROR_STOP 0 +SELECT histogram(temperature, -1.79769e+308, 1.79769e+308,10) FROM weather GROUP BY city; +ERROR: index -2147483648 from "width_bucket" out of range +\set ON_ERROR_STOP 1 diff --git a/test/expected/histogram_test-15.out b/test/expected/histogram_test-15.out new file mode 100644 index 00000000000..97d58601180 --- /dev/null +++ b/test/expected/histogram_test-15.out @@ -0,0 +1,127 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- table 1 +CREATE TABLE "hitest1"(key real, val varchar(40)); +-- insertions +INSERT INTO "hitest1" VALUES(0, 'hi'); +INSERT INTO "hitest1" VALUES(1, 'sup'); +INSERT INTO "hitest1" VALUES(2, 'hello'); +INSERT INTO "hitest1" VALUES(3, 'yo'); +INSERT INTO "hitest1" VALUES(4, 'howdy'); +INSERT INTO "hitest1" VALUES(5, 'hola'); +INSERT INTO "hitest1" VALUES(6, 'ya'); +INSERT INTO "hitest1" VALUES(1, 'sup'); +INSERT INTO "hitest1" VALUES(2, 'hello'); +INSERT INTO "hitest1" VALUES(1, 'sup'); +-- table 2 +CREATE TABLE "hitest2"(name varchar(30), score integer, qualify boolean); +-- insertions +INSERT INTO "hitest2" VALUES('Tom', 6, TRUE); +INSERT INTO "hitest2" VALUES('Mary', 4, FALSE); +INSERT INTO "hitest2" VALUES('Jaq', 3, FALSE); +INSERT INTO "hitest2" VALUES('Jane', 10, TRUE); +-- standard 2 bucket +SELECT histogram(key, 0, 9, 2) FROM hitest1; + histogram +----------- + {0,8,2,0} +(1 row) + +-- standard multi-bucket +SELECT histogram(key, 0, 9, 5) FROM hitest1; + histogram +----------------- + {0,4,3,2,1,0,0} +(1 row) + +-- standard 3 bucket +SELECT val, histogram(key, 0, 7, 3) FROM hitest1 GROUP BY val ORDER BY val; + val | histogram +-------+------------- + hello | {0,2,0,0,0} + hi | {0,1,0,0,0} + hola | {0,0,0,1,0} + howdy | {0,0,1,0,0} + sup | {0,3,0,0,0} + ya | {0,0,0,1,0} + yo | {0,0,1,0,0} +(7 rows) + +-- standard element beneath lb +SELECT histogram(key, 1, 7, 3) FROM hitest1; + histogram +------------- + {1,5,2,2,0} +(1 row) + +-- standard element above ub +SELECT histogram(key, 0, 3, 3) FROM hitest1; + histogram +------------- + {0,1,3,2,4} +(1 row) + +-- standard element beneath and above lb and ub, respectively +SELECT histogram(key, 1, 3, 2) FROM hitest1; + histogram +----------- + {1,3,2,4} +(1 row) + +-- standard 1 bucket +SELECT histogram(key, 1, 3, 1) FROM hitest1; + histogram +----------- + {1,5,4} +(1 row) + +-- standard 2 bucket +SELECT qualify, histogram(score, 0, 10, 2) FROM hitest2 GROUP BY qualify ORDER BY qualify; + qualify | histogram +---------+----------- + f | {0,2,0,0} + t | {0,0,1,1} +(2 rows) + +-- standard multi-bucket +SELECT qualify, histogram(score, 0, 10, 5) FROM hitest2 GROUP BY qualify ORDER BY qualify; + qualify | histogram +---------+----------------- + f | {0,0,1,1,0,0,0} + t | {0,0,0,0,1,0,1} +(2 rows) + +-- check number of buckets is constant +\set ON_ERROR_STOP 0 +select histogram(i,10,90,case when i=1 then 1 else 1000000 end) FROM generate_series(1,100) i; +ERROR: number of buckets must not change between calls +\set ON_ERROR_STOP 1 +CREATE TABLE weather ( + time TIMESTAMPTZ NOT NULL, + city TEXT, + temperature FLOAT, + PRIMARY KEY(time, city) +); +-- There is a bug in width_bucket() causing a NaN as a result, so we +-- check that it is not causing a crash in histogram(). +SELECT * FROM create_hypertable('weather', 'time', 'city', 3); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 1 | public | weather | t +(1 row) + +INSERT INTO weather VALUES + ('2023-02-10 09:16:51.133584+00','city1',10.4), + ('2023-02-10 11:16:51.611618+00','city1',10.3), + ('2023-02-10 06:58:59.999999+00','city1',10.3), + ('2023-02-10 01:58:59.999999+00','city1',10.3), + ('2023-02-09 01:58:59.999999+00','city1',10.3), + ('2023-02-10 08:58:59.999999+00','city1',10.3), + ('2023-03-23 06:12:02.73765+00 ','city1', 9.7), + ('2023-03-23 06:12:06.990998+00','city1',11.7); +-- This will currently generate an error on PG15 and prior versions +\set ON_ERROR_STOP 0 +SELECT histogram(temperature, -1.79769e+308, 1.79769e+308,10) FROM weather GROUP BY city; +ERROR: index -2147483648 from "width_bucket" out of range +\set ON_ERROR_STOP 1 diff --git a/test/expected/histogram_test-16.out b/test/expected/histogram_test-16.out new file mode 100644 index 00000000000..0239ca4920c --- /dev/null +++ b/test/expected/histogram_test-16.out @@ -0,0 +1,131 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- table 1 +CREATE TABLE "hitest1"(key real, val varchar(40)); +-- insertions +INSERT INTO "hitest1" VALUES(0, 'hi'); +INSERT INTO "hitest1" VALUES(1, 'sup'); +INSERT INTO "hitest1" VALUES(2, 'hello'); +INSERT INTO "hitest1" VALUES(3, 'yo'); +INSERT INTO "hitest1" VALUES(4, 'howdy'); +INSERT INTO "hitest1" VALUES(5, 'hola'); +INSERT INTO "hitest1" VALUES(6, 'ya'); +INSERT INTO "hitest1" VALUES(1, 'sup'); +INSERT INTO "hitest1" VALUES(2, 'hello'); +INSERT INTO "hitest1" VALUES(1, 'sup'); +-- table 2 +CREATE TABLE "hitest2"(name varchar(30), score integer, qualify boolean); +-- insertions +INSERT INTO "hitest2" VALUES('Tom', 6, TRUE); +INSERT INTO "hitest2" VALUES('Mary', 4, FALSE); +INSERT INTO "hitest2" VALUES('Jaq', 3, FALSE); +INSERT INTO "hitest2" VALUES('Jane', 10, TRUE); +-- standard 2 bucket +SELECT histogram(key, 0, 9, 2) FROM hitest1; + histogram +----------- + {0,8,2,0} +(1 row) + +-- standard multi-bucket +SELECT histogram(key, 0, 9, 5) FROM hitest1; + histogram +----------------- + {0,4,3,2,1,0,0} +(1 row) + +-- standard 3 bucket +SELECT val, histogram(key, 0, 7, 3) FROM hitest1 GROUP BY val ORDER BY val; + val | histogram +-------+------------- + hello | {0,2,0,0,0} + hi | {0,1,0,0,0} + hola | {0,0,0,1,0} + howdy | {0,0,1,0,0} + sup | {0,3,0,0,0} + ya | {0,0,0,1,0} + yo | {0,0,1,0,0} +(7 rows) + +-- standard element beneath lb +SELECT histogram(key, 1, 7, 3) FROM hitest1; + histogram +------------- + {1,5,2,2,0} +(1 row) + +-- standard element above ub +SELECT histogram(key, 0, 3, 3) FROM hitest1; + histogram +------------- + {0,1,3,2,4} +(1 row) + +-- standard element beneath and above lb and ub, respectively +SELECT histogram(key, 1, 3, 2) FROM hitest1; + histogram +----------- + {1,3,2,4} +(1 row) + +-- standard 1 bucket +SELECT histogram(key, 1, 3, 1) FROM hitest1; + histogram +----------- + {1,5,4} +(1 row) + +-- standard 2 bucket +SELECT qualify, histogram(score, 0, 10, 2) FROM hitest2 GROUP BY qualify ORDER BY qualify; + qualify | histogram +---------+----------- + f | {0,2,0,0} + t | {0,0,1,1} +(2 rows) + +-- standard multi-bucket +SELECT qualify, histogram(score, 0, 10, 5) FROM hitest2 GROUP BY qualify ORDER BY qualify; + qualify | histogram +---------+----------------- + f | {0,0,1,1,0,0,0} + t | {0,0,0,0,1,0,1} +(2 rows) + +-- check number of buckets is constant +\set ON_ERROR_STOP 0 +select histogram(i,10,90,case when i=1 then 1 else 1000000 end) FROM generate_series(1,100) i; +ERROR: number of buckets must not change between calls +\set ON_ERROR_STOP 1 +CREATE TABLE weather ( + time TIMESTAMPTZ NOT NULL, + city TEXT, + temperature FLOAT, + PRIMARY KEY(time, city) +); +-- There is a bug in width_bucket() causing a NaN as a result, so we +-- check that it is not causing a crash in histogram(). +SELECT * FROM create_hypertable('weather', 'time', 'city', 3); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 1 | public | weather | t +(1 row) + +INSERT INTO weather VALUES + ('2023-02-10 09:16:51.133584+00','city1',10.4), + ('2023-02-10 11:16:51.611618+00','city1',10.3), + ('2023-02-10 06:58:59.999999+00','city1',10.3), + ('2023-02-10 01:58:59.999999+00','city1',10.3), + ('2023-02-09 01:58:59.999999+00','city1',10.3), + ('2023-02-10 08:58:59.999999+00','city1',10.3), + ('2023-03-23 06:12:02.73765+00 ','city1', 9.7), + ('2023-03-23 06:12:06.990998+00','city1',11.7); +-- This will currently generate an error on PG15 and prior versions +\set ON_ERROR_STOP 0 +SELECT histogram(temperature, -1.79769e+308, 1.79769e+308,10) FROM weather GROUP BY city; + histogram +--------------------------- + {0,0,0,0,0,0,8,0,0,0,0,0} +(1 row) + +\set ON_ERROR_STOP 1 diff --git a/test/expected/insert.out b/test/expected/insert-13.out similarity index 100% rename from test/expected/insert.out rename to test/expected/insert-13.out diff --git a/test/expected/insert-14.out b/test/expected/insert-14.out new file mode 100644 index 00000000000..b4daca98c34 --- /dev/null +++ b/test/expected/insert-14.out @@ -0,0 +1,677 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\ir include/insert_two_partitions.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE TABLE PUBLIC."two_Partitions" ( + "timeCustom" BIGINT NOT NULL, + device_id TEXT NOT NULL, + series_0 DOUBLE PRECISION NULL, + series_1 DOUBLE PRECISION NULL, + series_2 DOUBLE PRECISION NULL, + series_bool BOOLEAN NULL +); +CREATE INDEX ON PUBLIC."two_Partitions" (device_id, "timeCustom" DESC NULLS LAST) WHERE device_id IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_0) WHERE series_0 IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_1) WHERE series_1 IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_2) WHERE series_2 IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_bool) WHERE series_bool IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, device_id); +SELECT * FROM create_hypertable('"public"."two_Partitions"'::regclass, 'timeCustom'::name, 'device_id'::name, associated_schema_name=>'_timescaledb_internal'::text, number_partitions => 2, chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); + hypertable_id | schema_name | table_name | created +---------------+-------------+----------------+--------- + 1 | public | two_Partitions | t +(1 row) + +\set QUIET off +BEGIN; +BEGIN +\COPY public."two_Partitions" FROM 'data/ds1_dev1_1.tsv' NULL AS ''; +COPY 7 +COMMIT; +COMMIT +INSERT INTO public."two_Partitions"("timeCustom", device_id, series_0, series_1) VALUES +(1257987600000000000, 'dev1', 1.5, 1), +(1257987600000000000, 'dev1', 1.5, 2), +(1257894000000000000, 'dev2', 1.5, 1), +(1257894002000000000, 'dev1', 2.5, 3); +INSERT 0 4 +INSERT INTO "two_Partitions"("timeCustom", device_id, series_0, series_1) VALUES +(1257894000000000000, 'dev2', 1.5, 2); +INSERT 0 1 +\set QUIET on +SELECT * FROM test.show_columnsp('_timescaledb_internal.%_hyper%'); + Relation | Kind | Column | Column type | NotNull +------------------------------------------------------------------------------------+------+-------------+------------------+--------- + _timescaledb_internal._hyper_1_1_chunk | r | timeCustom | bigint | t + _timescaledb_internal._hyper_1_1_chunk | r | device_id | text | t + _timescaledb_internal._hyper_1_1_chunk | r | series_0 | double precision | f + _timescaledb_internal._hyper_1_1_chunk | r | series_1 | double precision | f + _timescaledb_internal._hyper_1_1_chunk | r | series_2 | double precision | f + _timescaledb_internal._hyper_1_1_chunk | r | series_bool | boolean | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f + _timescaledb_internal._hyper_1_2_chunk | r | timeCustom | bigint | t + _timescaledb_internal._hyper_1_2_chunk | r | device_id | text | t + _timescaledb_internal._hyper_1_2_chunk | r | series_0 | double precision | f + _timescaledb_internal._hyper_1_2_chunk | r | series_1 | double precision | f + _timescaledb_internal._hyper_1_2_chunk | r | series_2 | double precision | f + _timescaledb_internal._hyper_1_2_chunk | r | series_bool | boolean | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f + _timescaledb_internal._hyper_1_3_chunk | r | timeCustom | bigint | t + _timescaledb_internal._hyper_1_3_chunk | r | device_id | text | t + _timescaledb_internal._hyper_1_3_chunk | r | series_0 | double precision | f + _timescaledb_internal._hyper_1_3_chunk | r | series_1 | double precision | f + _timescaledb_internal._hyper_1_3_chunk | r | series_2 | double precision | f + _timescaledb_internal._hyper_1_3_chunk | r | series_bool | boolean | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f + _timescaledb_internal._hyper_1_4_chunk | r | timeCustom | bigint | t + _timescaledb_internal._hyper_1_4_chunk | r | device_id | text | t + _timescaledb_internal._hyper_1_4_chunk | r | series_0 | double precision | f + _timescaledb_internal._hyper_1_4_chunk | r | series_1 | double precision | f + _timescaledb_internal._hyper_1_4_chunk | r | series_2 | double precision | f + _timescaledb_internal._hyper_1_4_chunk | r | series_bool | boolean | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f +(76 rows) + +SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%'); + Table | Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace +----------------------------------------+------------------------------------------------------------------------------------+--------------------------+------+--------+---------+-----------+------------ + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f | +(28 rows) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+------------------+---------------------+---------+--------+----------- + 1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | | f | 0 | f + 2 | 1 | _timescaledb_internal | _hyper_1_2_chunk | | f | 0 | f + 3 | 1 | _timescaledb_internal | _hyper_1_3_chunk | | f | 0 | f + 4 | 1 | _timescaledb_internal | _hyper_1_4_chunk | | f | 0 | f +(4 rows) + +SELECT * FROM "two_Partitions" ORDER BY "timeCustom", device_id, series_0, series_1; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +---------------------+-----------+----------+----------+----------+------------- + 1257894000000000000 | dev1 | 1.5 | 1 | 2 | t + 1257894000000000000 | dev1 | 1.5 | 2 | | + 1257894000000000000 | dev2 | 1.5 | 1 | | + 1257894000000000000 | dev2 | 1.5 | 2 | | + 1257894000000001000 | dev1 | 2.5 | 3 | | + 1257894001000000000 | dev1 | 3.5 | 4 | | + 1257894002000000000 | dev1 | 2.5 | 3 | | + 1257894002000000000 | dev1 | 5.5 | 6 | | t + 1257894002000000000 | dev1 | 5.5 | 7 | | f + 1257897600000000000 | dev1 | 4.5 | 5 | | f + 1257987600000000000 | dev1 | 1.5 | 1 | | + 1257987600000000000 | dev1 | 1.5 | 2 | | +(12 rows) + +SELECT * FROM ONLY "two_Partitions"; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +------------+-----------+----------+----------+----------+------------- +(0 rows) + +CREATE TABLE error_test(time timestamp, temp float8, device text NOT NULL); +SELECT create_hypertable('error_test', 'time', 'device', 2); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------- + (2,public,error_test,t) +(1 row) + +\set QUIET off +INSERT INTO error_test VALUES ('Mon Mar 20 09:18:20.1 2017', 21.3, 'dev1'); +INSERT 0 1 +\set ON_ERROR_STOP 0 +-- generate insert error +INSERT INTO error_test VALUES ('Mon Mar 20 09:18:22.3 2017', 21.1, NULL); +ERROR: null value in column "device" of relation "_hyper_2_6_chunk" violates not-null constraint +\set ON_ERROR_STOP 1 +INSERT INTO error_test VALUES ('Mon Mar 20 09:18:25.7 2017', 22.4, 'dev2'); +INSERT 0 1 +\set QUIET on +SELECT * FROM error_test; + time | temp | device +----------------------------+------+-------- + Mon Mar 20 09:18:20.1 2017 | 21.3 | dev1 + Mon Mar 20 09:18:25.7 2017 | 22.4 | dev2 +(2 rows) + +--test character(9) partition keys since there were issues with padding causing partitioning errors +CREATE TABLE tick_character ( + symbol character(9) NOT NULL, + mid REAL NOT NULL, + spread REAL NOT NULL, + time TIMESTAMPTZ NOT NULL +); +SELECT create_hypertable ('tick_character', 'time', 'symbol', 2); + create_hypertable +----------------------------- + (3,public,tick_character,t) +(1 row) + +INSERT INTO tick_character ( symbol, mid, spread, time ) VALUES ( 'GBPJPY', 142.639000, 5.80, 'Mon Mar 20 09:18:22.3 2017') RETURNING time, symbol, mid; + time | symbol | mid +--------------------------------+-----------+--------- + Mon Mar 20 09:18:22.3 2017 PDT | GBPJPY | 142.639 +(1 row) + +SELECT * FROM tick_character; + symbol | mid | spread | time +-----------+---------+--------+-------------------------------- + GBPJPY | 142.639 | 5.8 | Mon Mar 20 09:18:22.3 2017 PDT +(1 row) + +CREATE TABLE date_col_test(time date, temp float8, device text NOT NULL); +SELECT create_hypertable('date_col_test', 'time', 'device', 1000, chunk_time_interval => INTERVAL '1 Day'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (4,public,date_col_test,t) +(1 row) + +INSERT INTO date_col_test +VALUES ('2001-02-01', 98, 'dev1'), +('2001-03-02', 98, 'dev1'); +SELECT * FROM date_col_test WHERE time > '2001-01-01'; + time | temp | device +------------+------+-------- + 02-01-2001 | 98 | dev1 + 03-02-2001 | 98 | dev1 +(2 rows) + +-- Out-of-order insertion regression test. +-- this used to trip an assert in subspace_store.c checking that +-- max_open_chunks_per_insert was obeyed +set timescaledb.max_open_chunks_per_insert=1; +CREATE TABLE chunk_assert_fail(i bigint, j bigint); +SELECT create_hypertable('chunk_assert_fail', 'i', 'j', 1000, chunk_time_interval=>1); +NOTICE: adding not-null constraint to column "i" + create_hypertable +-------------------------------- + (5,public,chunk_assert_fail,t) +(1 row) + +insert into chunk_assert_fail values (1, 1), (1, 2), (2,1); +select * from chunk_assert_fail; + i | j +---+--- + 1 | 1 + 1 | 2 + 2 | 1 +(3 rows) + +CREATE TABLE one_space_test(time timestamp, temp float8, device text NOT NULL); +SELECT create_hypertable('one_space_test', 'time', 'device', 1); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_hypertable +----------------------------- + (6,public,one_space_test,t) +(1 row) + +INSERT INTO one_space_test VALUES +('2001-01-01 01:01:01', 1.0, 'device'), +('2002-01-01 01:02:01', 1.0, 'device'); +SELECT * FROM one_space_test; + time | temp | device +--------------------------+------+-------- + Mon Jan 01 01:01:01 2001 | 1 | device + Tue Jan 01 01:02:01 2002 | 1 | device +(2 rows) + +--CTE & EXPLAIN ANALYZE TESTS +WITH insert_cte as ( + INSERT INTO one_space_test VALUES + ('2001-01-01 01:02:01', 1.0, 'device') + RETURNING *) +SELECT * FROM insert_cte; + time | temp | device +--------------------------+------+-------- + Mon Jan 01 01:02:01 2001 | 1 | device +(1 row) + +EXPLAIN (analyze, costs off, timing off) --can't turn summary off in 9.6 so instead grep it away at end. +WITH insert_cte as ( + INSERT INTO one_space_test VALUES + ('2001-01-01 01:03:01', 1.0, 'device') + ) +SELECT 1 \g | grep -v "Planning" | grep -v "Execution" + QUERY PLAN +------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + CTE insert_cte + -> Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Insert on one_space_test (actual rows=0 loops=1) + -> Custom Scan (ChunkDispatch) (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) +(8 rows) + +-- INSERTs can exclude chunks based on constraints +EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail; + QUERY PLAN +------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on chunk_assert_fail + -> Custom Scan (ChunkDispatch) + -> Append + -> Seq Scan on _hyper_5_11_chunk + -> Seq Scan on _hyper_5_12_chunk + -> Seq Scan on _hyper_5_13_chunk +(7 rows) + +EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i < 1; + QUERY PLAN +-------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on chunk_assert_fail + -> Custom Scan (ChunkDispatch) + -> Result + One-Time Filter: false +(5 rows) + +EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i = 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on chunk_assert_fail + -> Custom Scan (ChunkDispatch) + -> Append + -> Index Scan using _hyper_5_11_chunk_chunk_assert_fail_i_idx on _hyper_5_11_chunk + Index Cond: (i = 1) + -> Index Scan using _hyper_5_12_chunk_chunk_assert_fail_i_idx on _hyper_5_12_chunk + Index Cond: (i = 1) +(8 rows) + +EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i > 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on chunk_assert_fail + -> Custom Scan (ChunkDispatch) + -> Index Scan using _hyper_5_13_chunk_chunk_assert_fail_i_idx on _hyper_5_13_chunk + Index Cond: (i > 1) +(5 rows) + +INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i > 1; +EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time < 'infinity' LIMIT 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on one_space_test + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Scan using _hyper_6_14_chunk_one_space_test_time_idx on _hyper_6_14_chunk + Index Cond: ("time" < 'infinity'::timestamp without time zone) + -> Index Scan using _hyper_6_15_chunk_one_space_test_time_idx on _hyper_6_15_chunk + Index Cond: ("time" < 'infinity'::timestamp without time zone) +(9 rows) + +EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time >= 'infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on one_space_test + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time <= '-infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on one_space_test + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time > '-infinity' LIMIT 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on one_space_test + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Scan using _hyper_6_14_chunk_one_space_test_time_idx on _hyper_6_14_chunk + Index Cond: ("time" > '-infinity'::timestamp without time zone) + -> Index Scan using _hyper_6_15_chunk_one_space_test_time_idx on _hyper_6_15_chunk + Index Cond: ("time" > '-infinity'::timestamp without time zone) +(9 rows) + +INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time < 'infinity' LIMIT 1; +INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time >= 'infinity' LIMIT 1; +INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time <= '-infinity' LIMIT 1; +INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time > '-infinity' LIMIT 1; +CREATE TABLE timestamp_inf(time TIMESTAMP); +SELECT create_hypertable('timestamp_inf', 'time'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (7,public,timestamp_inf,t) +(1 row) + +INSERT INTO timestamp_inf VALUES ('2018/01/02'), ('2019/01/02'); +EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf + WHERE time < 'infinity' LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on timestamp_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Only Scan using _hyper_7_16_chunk_timestamp_inf_time_idx on _hyper_7_16_chunk + Index Cond: ("time" < 'infinity'::timestamp without time zone) + -> Index Only Scan using _hyper_7_17_chunk_timestamp_inf_time_idx on _hyper_7_17_chunk + Index Cond: ("time" < 'infinity'::timestamp without time zone) +(9 rows) + +EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf + WHERE time >= 'infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on timestamp_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf + WHERE time <= '-infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on timestamp_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf + WHERE time > '-infinity' LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on timestamp_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Only Scan using _hyper_7_16_chunk_timestamp_inf_time_idx on _hyper_7_16_chunk + Index Cond: ("time" > '-infinity'::timestamp without time zone) + -> Index Only Scan using _hyper_7_17_chunk_timestamp_inf_time_idx on _hyper_7_17_chunk + Index Cond: ("time" > '-infinity'::timestamp without time zone) +(9 rows) + +CREATE TABLE date_inf(time DATE); +SELECT create_hypertable('date_inf', 'time'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +----------------------- + (8,public,date_inf,t) +(1 row) + +INSERT INTO date_inf VALUES ('2018/01/02'), ('2019/01/02'); +EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf + WHERE time < 'infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on date_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Only Scan using _hyper_8_18_chunk_date_inf_time_idx on _hyper_8_18_chunk + Index Cond: ("time" < 'infinity'::date) + -> Index Only Scan using _hyper_8_19_chunk_date_inf_time_idx on _hyper_8_19_chunk + Index Cond: ("time" < 'infinity'::date) +(9 rows) + +EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf + WHERE time >= 'infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on date_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf + WHERE time <= '-infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on date_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf + WHERE time > '-infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on date_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Only Scan using _hyper_8_18_chunk_date_inf_time_idx on _hyper_8_18_chunk + Index Cond: ("time" > '-infinity'::date) + -> Index Only Scan using _hyper_8_19_chunk_date_inf_time_idx on _hyper_8_19_chunk + Index Cond: ("time" > '-infinity'::date) +(9 rows) + +-- test INSERT with cached plans / plpgsql functions +-- https://github.com/timescale/timescaledb/issues/1809 +CREATE TABLE status_table(a int, b int, last_ts timestamptz, UNIQUE(a,b)); +CREATE TABLE metrics(time timestamptz NOT NULL, value float); +CREATE TABLE metrics2(time timestamptz NOT NULL, value float); +SELECT (create_hypertable(t,'time')).table_name FROM (VALUES ('metrics'),('metrics2')) v(t); + table_name +------------ + metrics + metrics2 +(2 rows) + +INSERT INTO metrics VALUES ('2000-01-01',random()), ('2000-02-01',random()), ('2000-03-01',random()); +CREATE OR REPLACE FUNCTION insert_test() RETURNS VOID LANGUAGE plpgsql AS +$$ + DECLARE + r RECORD; + BEGIN + FOR r IN + SELECT * FROM metrics + LOOP + WITH foo AS ( + INSERT INTO metrics2 SELECT * FROM metrics RETURNING * + ) + INSERT INTO status_table (a,b, last_ts) + VALUES (1,1, now()) + ON CONFLICT (a,b) DO UPDATE SET last_ts=(SELECT max(time) FROM metrics); + END LOOP; + END; +$$; +SELECT insert_test(), insert_test(), insert_test(); + insert_test | insert_test | insert_test +-------------+-------------+------------- + | | +(1 row) + +-- test Postgres crashes on INSERT ... SELECT ... WHERE NOT EXISTS with empty table +-- https://github.com/timescale/timescaledb/issues/1883 +CREATE TABLE readings ( + toe TIMESTAMPTZ NOT NULL, + sensor_id INT NOT NULL, + value INT NOT NULL +); +SELECT create_hypertable( + 'readings', + 'toe', + chunk_time_interval => interval '1 day', + if_not_exists => TRUE, + migrate_data => TRUE +); + create_hypertable +------------------------ + (11,public,readings,t) +(1 row) + +EXPLAIN (costs off) +INSERT INTO readings +SELECT '2020-05-09 10:34:35.296288+00', 1, 0 +WHERE NOT EXISTS ( + SELECT 1 + FROM readings + WHERE sensor_id = 1 + AND toe = '2020-05-09 10:34:35.296288+00' +); + QUERY PLAN +----------------------------------------------------- + Custom Scan (HypertableModify) + InitPlan 1 (returns $0) + -> Result + One-Time Filter: false + -> Insert on readings + -> Result + One-Time Filter: (NOT $0) + -> Custom Scan (ChunkDispatch) + -> Result + One-Time Filter: (NOT $0) +(10 rows) + +INSERT INTO readings +SELECT '2020-05-09 10:34:35.296288+00', 1, 0 +WHERE NOT EXISTS ( + SELECT 1 + FROM readings + WHERE sensor_id = 1 + AND toe = '2020-05-09 10:34:35.296288+00' +); +DROP TABLE readings; +CREATE TABLE sample_table ( + sequence INTEGER NOT NULL, + time TIMESTAMP WITHOUT TIME ZONE NOT NULL, + value NUMERIC NOT NULL, + UNIQUE (sequence, time) +); +SELECT * FROM create_hypertable('sample_table', 'time', + chunk_time_interval => INTERVAL '1 day'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 12 | public | sample_table | t +(1 row) + +INSERT INTO sample_table (sequence,time,value) VALUES + (7, generate_series(TIMESTAMP '2019-08-01', TIMESTAMP '2019-08-10', INTERVAL '10 minutes'), ROUND(RANDOM()*10)::int); +\set ON_ERROR_STOP 0 +INSERT INTO sample_table (sequence,time,value) VALUES + (7, generate_series(TIMESTAMP '2019-07-21', TIMESTAMP '2019-08-01', INTERVAL '10 minutes'), ROUND(RANDOM()*10)::int); +ERROR: duplicate key value violates unique constraint "27_1_sample_table_sequence_time_key" +\set ON_ERROR_STOP 1 +INSERT INTO sample_table (sequence,time,value) VALUES + (7,generate_series(TIMESTAMP '2019-01-01', TIMESTAMP '2019-07-01', '10 minutes'), ROUND(RANDOM()*10)::int); +DROP TABLE sample_table; +-- test on conflict clause on columns with default value +-- issue #3037 +CREATE TABLE i3037(time timestamptz PRIMARY KEY); +SELECT create_hypertable('i3037','time'); + create_hypertable +--------------------- + (13,public,i3037,t) +(1 row) + +ALTER TABLE i3037 ADD COLUMN value float DEFAULT 0; +INSERT INTO i3037 VALUES ('2000-01-01'); +INSERT INTO i3037 VALUES ('2000-01-01') ON CONFLICT(time) DO UPDATE SET value = EXCLUDED.value; diff --git a/test/expected/insert-15.out b/test/expected/insert-15.out new file mode 100644 index 00000000000..b4daca98c34 --- /dev/null +++ b/test/expected/insert-15.out @@ -0,0 +1,677 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\ir include/insert_two_partitions.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE TABLE PUBLIC."two_Partitions" ( + "timeCustom" BIGINT NOT NULL, + device_id TEXT NOT NULL, + series_0 DOUBLE PRECISION NULL, + series_1 DOUBLE PRECISION NULL, + series_2 DOUBLE PRECISION NULL, + series_bool BOOLEAN NULL +); +CREATE INDEX ON PUBLIC."two_Partitions" (device_id, "timeCustom" DESC NULLS LAST) WHERE device_id IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_0) WHERE series_0 IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_1) WHERE series_1 IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_2) WHERE series_2 IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_bool) WHERE series_bool IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, device_id); +SELECT * FROM create_hypertable('"public"."two_Partitions"'::regclass, 'timeCustom'::name, 'device_id'::name, associated_schema_name=>'_timescaledb_internal'::text, number_partitions => 2, chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); + hypertable_id | schema_name | table_name | created +---------------+-------------+----------------+--------- + 1 | public | two_Partitions | t +(1 row) + +\set QUIET off +BEGIN; +BEGIN +\COPY public."two_Partitions" FROM 'data/ds1_dev1_1.tsv' NULL AS ''; +COPY 7 +COMMIT; +COMMIT +INSERT INTO public."two_Partitions"("timeCustom", device_id, series_0, series_1) VALUES +(1257987600000000000, 'dev1', 1.5, 1), +(1257987600000000000, 'dev1', 1.5, 2), +(1257894000000000000, 'dev2', 1.5, 1), +(1257894002000000000, 'dev1', 2.5, 3); +INSERT 0 4 +INSERT INTO "two_Partitions"("timeCustom", device_id, series_0, series_1) VALUES +(1257894000000000000, 'dev2', 1.5, 2); +INSERT 0 1 +\set QUIET on +SELECT * FROM test.show_columnsp('_timescaledb_internal.%_hyper%'); + Relation | Kind | Column | Column type | NotNull +------------------------------------------------------------------------------------+------+-------------+------------------+--------- + _timescaledb_internal._hyper_1_1_chunk | r | timeCustom | bigint | t + _timescaledb_internal._hyper_1_1_chunk | r | device_id | text | t + _timescaledb_internal._hyper_1_1_chunk | r | series_0 | double precision | f + _timescaledb_internal._hyper_1_1_chunk | r | series_1 | double precision | f + _timescaledb_internal._hyper_1_1_chunk | r | series_2 | double precision | f + _timescaledb_internal._hyper_1_1_chunk | r | series_bool | boolean | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f + _timescaledb_internal._hyper_1_2_chunk | r | timeCustom | bigint | t + _timescaledb_internal._hyper_1_2_chunk | r | device_id | text | t + _timescaledb_internal._hyper_1_2_chunk | r | series_0 | double precision | f + _timescaledb_internal._hyper_1_2_chunk | r | series_1 | double precision | f + _timescaledb_internal._hyper_1_2_chunk | r | series_2 | double precision | f + _timescaledb_internal._hyper_1_2_chunk | r | series_bool | boolean | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f + _timescaledb_internal._hyper_1_3_chunk | r | timeCustom | bigint | t + _timescaledb_internal._hyper_1_3_chunk | r | device_id | text | t + _timescaledb_internal._hyper_1_3_chunk | r | series_0 | double precision | f + _timescaledb_internal._hyper_1_3_chunk | r | series_1 | double precision | f + _timescaledb_internal._hyper_1_3_chunk | r | series_2 | double precision | f + _timescaledb_internal._hyper_1_3_chunk | r | series_bool | boolean | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f + _timescaledb_internal._hyper_1_4_chunk | r | timeCustom | bigint | t + _timescaledb_internal._hyper_1_4_chunk | r | device_id | text | t + _timescaledb_internal._hyper_1_4_chunk | r | series_0 | double precision | f + _timescaledb_internal._hyper_1_4_chunk | r | series_1 | double precision | f + _timescaledb_internal._hyper_1_4_chunk | r | series_2 | double precision | f + _timescaledb_internal._hyper_1_4_chunk | r | series_bool | boolean | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f +(76 rows) + +SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%'); + Table | Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace +----------------------------------------+------------------------------------------------------------------------------------+--------------------------+------+--------+---------+-----------+------------ + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f | +(28 rows) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+------------------+---------------------+---------+--------+----------- + 1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | | f | 0 | f + 2 | 1 | _timescaledb_internal | _hyper_1_2_chunk | | f | 0 | f + 3 | 1 | _timescaledb_internal | _hyper_1_3_chunk | | f | 0 | f + 4 | 1 | _timescaledb_internal | _hyper_1_4_chunk | | f | 0 | f +(4 rows) + +SELECT * FROM "two_Partitions" ORDER BY "timeCustom", device_id, series_0, series_1; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +---------------------+-----------+----------+----------+----------+------------- + 1257894000000000000 | dev1 | 1.5 | 1 | 2 | t + 1257894000000000000 | dev1 | 1.5 | 2 | | + 1257894000000000000 | dev2 | 1.5 | 1 | | + 1257894000000000000 | dev2 | 1.5 | 2 | | + 1257894000000001000 | dev1 | 2.5 | 3 | | + 1257894001000000000 | dev1 | 3.5 | 4 | | + 1257894002000000000 | dev1 | 2.5 | 3 | | + 1257894002000000000 | dev1 | 5.5 | 6 | | t + 1257894002000000000 | dev1 | 5.5 | 7 | | f + 1257897600000000000 | dev1 | 4.5 | 5 | | f + 1257987600000000000 | dev1 | 1.5 | 1 | | + 1257987600000000000 | dev1 | 1.5 | 2 | | +(12 rows) + +SELECT * FROM ONLY "two_Partitions"; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +------------+-----------+----------+----------+----------+------------- +(0 rows) + +CREATE TABLE error_test(time timestamp, temp float8, device text NOT NULL); +SELECT create_hypertable('error_test', 'time', 'device', 2); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------- + (2,public,error_test,t) +(1 row) + +\set QUIET off +INSERT INTO error_test VALUES ('Mon Mar 20 09:18:20.1 2017', 21.3, 'dev1'); +INSERT 0 1 +\set ON_ERROR_STOP 0 +-- generate insert error +INSERT INTO error_test VALUES ('Mon Mar 20 09:18:22.3 2017', 21.1, NULL); +ERROR: null value in column "device" of relation "_hyper_2_6_chunk" violates not-null constraint +\set ON_ERROR_STOP 1 +INSERT INTO error_test VALUES ('Mon Mar 20 09:18:25.7 2017', 22.4, 'dev2'); +INSERT 0 1 +\set QUIET on +SELECT * FROM error_test; + time | temp | device +----------------------------+------+-------- + Mon Mar 20 09:18:20.1 2017 | 21.3 | dev1 + Mon Mar 20 09:18:25.7 2017 | 22.4 | dev2 +(2 rows) + +--test character(9) partition keys since there were issues with padding causing partitioning errors +CREATE TABLE tick_character ( + symbol character(9) NOT NULL, + mid REAL NOT NULL, + spread REAL NOT NULL, + time TIMESTAMPTZ NOT NULL +); +SELECT create_hypertable ('tick_character', 'time', 'symbol', 2); + create_hypertable +----------------------------- + (3,public,tick_character,t) +(1 row) + +INSERT INTO tick_character ( symbol, mid, spread, time ) VALUES ( 'GBPJPY', 142.639000, 5.80, 'Mon Mar 20 09:18:22.3 2017') RETURNING time, symbol, mid; + time | symbol | mid +--------------------------------+-----------+--------- + Mon Mar 20 09:18:22.3 2017 PDT | GBPJPY | 142.639 +(1 row) + +SELECT * FROM tick_character; + symbol | mid | spread | time +-----------+---------+--------+-------------------------------- + GBPJPY | 142.639 | 5.8 | Mon Mar 20 09:18:22.3 2017 PDT +(1 row) + +CREATE TABLE date_col_test(time date, temp float8, device text NOT NULL); +SELECT create_hypertable('date_col_test', 'time', 'device', 1000, chunk_time_interval => INTERVAL '1 Day'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (4,public,date_col_test,t) +(1 row) + +INSERT INTO date_col_test +VALUES ('2001-02-01', 98, 'dev1'), +('2001-03-02', 98, 'dev1'); +SELECT * FROM date_col_test WHERE time > '2001-01-01'; + time | temp | device +------------+------+-------- + 02-01-2001 | 98 | dev1 + 03-02-2001 | 98 | dev1 +(2 rows) + +-- Out-of-order insertion regression test. +-- this used to trip an assert in subspace_store.c checking that +-- max_open_chunks_per_insert was obeyed +set timescaledb.max_open_chunks_per_insert=1; +CREATE TABLE chunk_assert_fail(i bigint, j bigint); +SELECT create_hypertable('chunk_assert_fail', 'i', 'j', 1000, chunk_time_interval=>1); +NOTICE: adding not-null constraint to column "i" + create_hypertable +-------------------------------- + (5,public,chunk_assert_fail,t) +(1 row) + +insert into chunk_assert_fail values (1, 1), (1, 2), (2,1); +select * from chunk_assert_fail; + i | j +---+--- + 1 | 1 + 1 | 2 + 2 | 1 +(3 rows) + +CREATE TABLE one_space_test(time timestamp, temp float8, device text NOT NULL); +SELECT create_hypertable('one_space_test', 'time', 'device', 1); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_hypertable +----------------------------- + (6,public,one_space_test,t) +(1 row) + +INSERT INTO one_space_test VALUES +('2001-01-01 01:01:01', 1.0, 'device'), +('2002-01-01 01:02:01', 1.0, 'device'); +SELECT * FROM one_space_test; + time | temp | device +--------------------------+------+-------- + Mon Jan 01 01:01:01 2001 | 1 | device + Tue Jan 01 01:02:01 2002 | 1 | device +(2 rows) + +--CTE & EXPLAIN ANALYZE TESTS +WITH insert_cte as ( + INSERT INTO one_space_test VALUES + ('2001-01-01 01:02:01', 1.0, 'device') + RETURNING *) +SELECT * FROM insert_cte; + time | temp | device +--------------------------+------+-------- + Mon Jan 01 01:02:01 2001 | 1 | device +(1 row) + +EXPLAIN (analyze, costs off, timing off) --can't turn summary off in 9.6 so instead grep it away at end. +WITH insert_cte as ( + INSERT INTO one_space_test VALUES + ('2001-01-01 01:03:01', 1.0, 'device') + ) +SELECT 1 \g | grep -v "Planning" | grep -v "Execution" + QUERY PLAN +------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + CTE insert_cte + -> Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Insert on one_space_test (actual rows=0 loops=1) + -> Custom Scan (ChunkDispatch) (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) +(8 rows) + +-- INSERTs can exclude chunks based on constraints +EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail; + QUERY PLAN +------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on chunk_assert_fail + -> Custom Scan (ChunkDispatch) + -> Append + -> Seq Scan on _hyper_5_11_chunk + -> Seq Scan on _hyper_5_12_chunk + -> Seq Scan on _hyper_5_13_chunk +(7 rows) + +EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i < 1; + QUERY PLAN +-------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on chunk_assert_fail + -> Custom Scan (ChunkDispatch) + -> Result + One-Time Filter: false +(5 rows) + +EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i = 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on chunk_assert_fail + -> Custom Scan (ChunkDispatch) + -> Append + -> Index Scan using _hyper_5_11_chunk_chunk_assert_fail_i_idx on _hyper_5_11_chunk + Index Cond: (i = 1) + -> Index Scan using _hyper_5_12_chunk_chunk_assert_fail_i_idx on _hyper_5_12_chunk + Index Cond: (i = 1) +(8 rows) + +EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i > 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on chunk_assert_fail + -> Custom Scan (ChunkDispatch) + -> Index Scan using _hyper_5_13_chunk_chunk_assert_fail_i_idx on _hyper_5_13_chunk + Index Cond: (i > 1) +(5 rows) + +INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i > 1; +EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time < 'infinity' LIMIT 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on one_space_test + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Scan using _hyper_6_14_chunk_one_space_test_time_idx on _hyper_6_14_chunk + Index Cond: ("time" < 'infinity'::timestamp without time zone) + -> Index Scan using _hyper_6_15_chunk_one_space_test_time_idx on _hyper_6_15_chunk + Index Cond: ("time" < 'infinity'::timestamp without time zone) +(9 rows) + +EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time >= 'infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on one_space_test + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time <= '-infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on one_space_test + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time > '-infinity' LIMIT 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on one_space_test + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Scan using _hyper_6_14_chunk_one_space_test_time_idx on _hyper_6_14_chunk + Index Cond: ("time" > '-infinity'::timestamp without time zone) + -> Index Scan using _hyper_6_15_chunk_one_space_test_time_idx on _hyper_6_15_chunk + Index Cond: ("time" > '-infinity'::timestamp without time zone) +(9 rows) + +INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time < 'infinity' LIMIT 1; +INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time >= 'infinity' LIMIT 1; +INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time <= '-infinity' LIMIT 1; +INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time > '-infinity' LIMIT 1; +CREATE TABLE timestamp_inf(time TIMESTAMP); +SELECT create_hypertable('timestamp_inf', 'time'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (7,public,timestamp_inf,t) +(1 row) + +INSERT INTO timestamp_inf VALUES ('2018/01/02'), ('2019/01/02'); +EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf + WHERE time < 'infinity' LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on timestamp_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Only Scan using _hyper_7_16_chunk_timestamp_inf_time_idx on _hyper_7_16_chunk + Index Cond: ("time" < 'infinity'::timestamp without time zone) + -> Index Only Scan using _hyper_7_17_chunk_timestamp_inf_time_idx on _hyper_7_17_chunk + Index Cond: ("time" < 'infinity'::timestamp without time zone) +(9 rows) + +EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf + WHERE time >= 'infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on timestamp_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf + WHERE time <= '-infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on timestamp_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf + WHERE time > '-infinity' LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on timestamp_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Only Scan using _hyper_7_16_chunk_timestamp_inf_time_idx on _hyper_7_16_chunk + Index Cond: ("time" > '-infinity'::timestamp without time zone) + -> Index Only Scan using _hyper_7_17_chunk_timestamp_inf_time_idx on _hyper_7_17_chunk + Index Cond: ("time" > '-infinity'::timestamp without time zone) +(9 rows) + +CREATE TABLE date_inf(time DATE); +SELECT create_hypertable('date_inf', 'time'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +----------------------- + (8,public,date_inf,t) +(1 row) + +INSERT INTO date_inf VALUES ('2018/01/02'), ('2019/01/02'); +EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf + WHERE time < 'infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on date_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Only Scan using _hyper_8_18_chunk_date_inf_time_idx on _hyper_8_18_chunk + Index Cond: ("time" < 'infinity'::date) + -> Index Only Scan using _hyper_8_19_chunk_date_inf_time_idx on _hyper_8_19_chunk + Index Cond: ("time" < 'infinity'::date) +(9 rows) + +EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf + WHERE time >= 'infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on date_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf + WHERE time <= '-infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on date_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf + WHERE time > '-infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on date_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Only Scan using _hyper_8_18_chunk_date_inf_time_idx on _hyper_8_18_chunk + Index Cond: ("time" > '-infinity'::date) + -> Index Only Scan using _hyper_8_19_chunk_date_inf_time_idx on _hyper_8_19_chunk + Index Cond: ("time" > '-infinity'::date) +(9 rows) + +-- test INSERT with cached plans / plpgsql functions +-- https://github.com/timescale/timescaledb/issues/1809 +CREATE TABLE status_table(a int, b int, last_ts timestamptz, UNIQUE(a,b)); +CREATE TABLE metrics(time timestamptz NOT NULL, value float); +CREATE TABLE metrics2(time timestamptz NOT NULL, value float); +SELECT (create_hypertable(t,'time')).table_name FROM (VALUES ('metrics'),('metrics2')) v(t); + table_name +------------ + metrics + metrics2 +(2 rows) + +INSERT INTO metrics VALUES ('2000-01-01',random()), ('2000-02-01',random()), ('2000-03-01',random()); +CREATE OR REPLACE FUNCTION insert_test() RETURNS VOID LANGUAGE plpgsql AS +$$ + DECLARE + r RECORD; + BEGIN + FOR r IN + SELECT * FROM metrics + LOOP + WITH foo AS ( + INSERT INTO metrics2 SELECT * FROM metrics RETURNING * + ) + INSERT INTO status_table (a,b, last_ts) + VALUES (1,1, now()) + ON CONFLICT (a,b) DO UPDATE SET last_ts=(SELECT max(time) FROM metrics); + END LOOP; + END; +$$; +SELECT insert_test(), insert_test(), insert_test(); + insert_test | insert_test | insert_test +-------------+-------------+------------- + | | +(1 row) + +-- test Postgres crashes on INSERT ... SELECT ... WHERE NOT EXISTS with empty table +-- https://github.com/timescale/timescaledb/issues/1883 +CREATE TABLE readings ( + toe TIMESTAMPTZ NOT NULL, + sensor_id INT NOT NULL, + value INT NOT NULL +); +SELECT create_hypertable( + 'readings', + 'toe', + chunk_time_interval => interval '1 day', + if_not_exists => TRUE, + migrate_data => TRUE +); + create_hypertable +------------------------ + (11,public,readings,t) +(1 row) + +EXPLAIN (costs off) +INSERT INTO readings +SELECT '2020-05-09 10:34:35.296288+00', 1, 0 +WHERE NOT EXISTS ( + SELECT 1 + FROM readings + WHERE sensor_id = 1 + AND toe = '2020-05-09 10:34:35.296288+00' +); + QUERY PLAN +----------------------------------------------------- + Custom Scan (HypertableModify) + InitPlan 1 (returns $0) + -> Result + One-Time Filter: false + -> Insert on readings + -> Result + One-Time Filter: (NOT $0) + -> Custom Scan (ChunkDispatch) + -> Result + One-Time Filter: (NOT $0) +(10 rows) + +INSERT INTO readings +SELECT '2020-05-09 10:34:35.296288+00', 1, 0 +WHERE NOT EXISTS ( + SELECT 1 + FROM readings + WHERE sensor_id = 1 + AND toe = '2020-05-09 10:34:35.296288+00' +); +DROP TABLE readings; +CREATE TABLE sample_table ( + sequence INTEGER NOT NULL, + time TIMESTAMP WITHOUT TIME ZONE NOT NULL, + value NUMERIC NOT NULL, + UNIQUE (sequence, time) +); +SELECT * FROM create_hypertable('sample_table', 'time', + chunk_time_interval => INTERVAL '1 day'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 12 | public | sample_table | t +(1 row) + +INSERT INTO sample_table (sequence,time,value) VALUES + (7, generate_series(TIMESTAMP '2019-08-01', TIMESTAMP '2019-08-10', INTERVAL '10 minutes'), ROUND(RANDOM()*10)::int); +\set ON_ERROR_STOP 0 +INSERT INTO sample_table (sequence,time,value) VALUES + (7, generate_series(TIMESTAMP '2019-07-21', TIMESTAMP '2019-08-01', INTERVAL '10 minutes'), ROUND(RANDOM()*10)::int); +ERROR: duplicate key value violates unique constraint "27_1_sample_table_sequence_time_key" +\set ON_ERROR_STOP 1 +INSERT INTO sample_table (sequence,time,value) VALUES + (7,generate_series(TIMESTAMP '2019-01-01', TIMESTAMP '2019-07-01', '10 minutes'), ROUND(RANDOM()*10)::int); +DROP TABLE sample_table; +-- test on conflict clause on columns with default value +-- issue #3037 +CREATE TABLE i3037(time timestamptz PRIMARY KEY); +SELECT create_hypertable('i3037','time'); + create_hypertable +--------------------- + (13,public,i3037,t) +(1 row) + +ALTER TABLE i3037 ADD COLUMN value float DEFAULT 0; +INSERT INTO i3037 VALUES ('2000-01-01'); +INSERT INTO i3037 VALUES ('2000-01-01') ON CONFLICT(time) DO UPDATE SET value = EXCLUDED.value; diff --git a/test/expected/insert-16.out b/test/expected/insert-16.out new file mode 100644 index 00000000000..3d49a902e36 --- /dev/null +++ b/test/expected/insert-16.out @@ -0,0 +1,675 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\ir include/insert_two_partitions.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE TABLE PUBLIC."two_Partitions" ( + "timeCustom" BIGINT NOT NULL, + device_id TEXT NOT NULL, + series_0 DOUBLE PRECISION NULL, + series_1 DOUBLE PRECISION NULL, + series_2 DOUBLE PRECISION NULL, + series_bool BOOLEAN NULL +); +CREATE INDEX ON PUBLIC."two_Partitions" (device_id, "timeCustom" DESC NULLS LAST) WHERE device_id IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_0) WHERE series_0 IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_1) WHERE series_1 IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_2) WHERE series_2 IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_bool) WHERE series_bool IS NOT NULL; +CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, device_id); +SELECT * FROM create_hypertable('"public"."two_Partitions"'::regclass, 'timeCustom'::name, 'device_id'::name, associated_schema_name=>'_timescaledb_internal'::text, number_partitions => 2, chunk_time_interval=>_timescaledb_functions.interval_to_usec('1 month')); + hypertable_id | schema_name | table_name | created +---------------+-------------+----------------+--------- + 1 | public | two_Partitions | t +(1 row) + +\set QUIET off +BEGIN; +BEGIN +\COPY public."two_Partitions" FROM 'data/ds1_dev1_1.tsv' NULL AS ''; +COPY 7 +COMMIT; +COMMIT +INSERT INTO public."two_Partitions"("timeCustom", device_id, series_0, series_1) VALUES +(1257987600000000000, 'dev1', 1.5, 1), +(1257987600000000000, 'dev1', 1.5, 2), +(1257894000000000000, 'dev2', 1.5, 1), +(1257894002000000000, 'dev1', 2.5, 3); +INSERT 0 4 +INSERT INTO "two_Partitions"("timeCustom", device_id, series_0, series_1) VALUES +(1257894000000000000, 'dev2', 1.5, 2); +INSERT 0 1 +\set QUIET on +SELECT * FROM test.show_columnsp('_timescaledb_internal.%_hyper%'); + Relation | Kind | Column | Column type | NotNull +------------------------------------------------------------------------------------+------+-------------+------------------+--------- + _timescaledb_internal._hyper_1_1_chunk | r | timeCustom | bigint | t + _timescaledb_internal._hyper_1_1_chunk | r | device_id | text | t + _timescaledb_internal._hyper_1_1_chunk | r | series_0 | double precision | f + _timescaledb_internal._hyper_1_1_chunk | r | series_1 | double precision | f + _timescaledb_internal._hyper_1_1_chunk | r | series_2 | double precision | f + _timescaledb_internal._hyper_1_1_chunk | r | series_bool | boolean | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f + _timescaledb_internal._hyper_1_2_chunk | r | timeCustom | bigint | t + _timescaledb_internal._hyper_1_2_chunk | r | device_id | text | t + _timescaledb_internal._hyper_1_2_chunk | r | series_0 | double precision | f + _timescaledb_internal._hyper_1_2_chunk | r | series_1 | double precision | f + _timescaledb_internal._hyper_1_2_chunk | r | series_2 | double precision | f + _timescaledb_internal._hyper_1_2_chunk | r | series_bool | boolean | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f + _timescaledb_internal._hyper_1_3_chunk | r | timeCustom | bigint | t + _timescaledb_internal._hyper_1_3_chunk | r | device_id | text | t + _timescaledb_internal._hyper_1_3_chunk | r | series_0 | double precision | f + _timescaledb_internal._hyper_1_3_chunk | r | series_1 | double precision | f + _timescaledb_internal._hyper_1_3_chunk | r | series_2 | double precision | f + _timescaledb_internal._hyper_1_3_chunk | r | series_bool | boolean | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f + _timescaledb_internal._hyper_1_4_chunk | r | timeCustom | bigint | t + _timescaledb_internal._hyper_1_4_chunk | r | device_id | text | t + _timescaledb_internal._hyper_1_4_chunk | r | series_0 | double precision | f + _timescaledb_internal._hyper_1_4_chunk | r | series_1 | double precision | f + _timescaledb_internal._hyper_1_4_chunk | r | series_2 | double precision | f + _timescaledb_internal._hyper_1_4_chunk | r | series_bool | boolean | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f + _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f +(76 rows) + +SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%'); + Table | Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace +----------------------------------------+------------------------------------------------------------------------------------+--------------------------+------+--------+---------+-----------+------------ + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f | + _timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f | + _timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f | + _timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f | + _timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f | +(28 rows) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+------------------+---------------------+---------+--------+----------- + 1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | | f | 0 | f + 2 | 1 | _timescaledb_internal | _hyper_1_2_chunk | | f | 0 | f + 3 | 1 | _timescaledb_internal | _hyper_1_3_chunk | | f | 0 | f + 4 | 1 | _timescaledb_internal | _hyper_1_4_chunk | | f | 0 | f +(4 rows) + +SELECT * FROM "two_Partitions" ORDER BY "timeCustom", device_id, series_0, series_1; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +---------------------+-----------+----------+----------+----------+------------- + 1257894000000000000 | dev1 | 1.5 | 1 | 2 | t + 1257894000000000000 | dev1 | 1.5 | 2 | | + 1257894000000000000 | dev2 | 1.5 | 1 | | + 1257894000000000000 | dev2 | 1.5 | 2 | | + 1257894000000001000 | dev1 | 2.5 | 3 | | + 1257894001000000000 | dev1 | 3.5 | 4 | | + 1257894002000000000 | dev1 | 2.5 | 3 | | + 1257894002000000000 | dev1 | 5.5 | 6 | | t + 1257894002000000000 | dev1 | 5.5 | 7 | | f + 1257897600000000000 | dev1 | 4.5 | 5 | | f + 1257987600000000000 | dev1 | 1.5 | 1 | | + 1257987600000000000 | dev1 | 1.5 | 2 | | +(12 rows) + +SELECT * FROM ONLY "two_Partitions"; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +------------+-----------+----------+----------+----------+------------- +(0 rows) + +CREATE TABLE error_test(time timestamp, temp float8, device text NOT NULL); +SELECT create_hypertable('error_test', 'time', 'device', 2); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------- + (2,public,error_test,t) +(1 row) + +\set QUIET off +INSERT INTO error_test VALUES ('Mon Mar 20 09:18:20.1 2017', 21.3, 'dev1'); +INSERT 0 1 +\set ON_ERROR_STOP 0 +-- generate insert error +INSERT INTO error_test VALUES ('Mon Mar 20 09:18:22.3 2017', 21.1, NULL); +ERROR: null value in column "device" of relation "_hyper_2_6_chunk" violates not-null constraint +\set ON_ERROR_STOP 1 +INSERT INTO error_test VALUES ('Mon Mar 20 09:18:25.7 2017', 22.4, 'dev2'); +INSERT 0 1 +\set QUIET on +SELECT * FROM error_test; + time | temp | device +----------------------------+------+-------- + Mon Mar 20 09:18:20.1 2017 | 21.3 | dev1 + Mon Mar 20 09:18:25.7 2017 | 22.4 | dev2 +(2 rows) + +--test character(9) partition keys since there were issues with padding causing partitioning errors +CREATE TABLE tick_character ( + symbol character(9) NOT NULL, + mid REAL NOT NULL, + spread REAL NOT NULL, + time TIMESTAMPTZ NOT NULL +); +SELECT create_hypertable ('tick_character', 'time', 'symbol', 2); + create_hypertable +----------------------------- + (3,public,tick_character,t) +(1 row) + +INSERT INTO tick_character ( symbol, mid, spread, time ) VALUES ( 'GBPJPY', 142.639000, 5.80, 'Mon Mar 20 09:18:22.3 2017') RETURNING time, symbol, mid; + time | symbol | mid +--------------------------------+-----------+--------- + Mon Mar 20 09:18:22.3 2017 PDT | GBPJPY | 142.639 +(1 row) + +SELECT * FROM tick_character; + symbol | mid | spread | time +-----------+---------+--------+-------------------------------- + GBPJPY | 142.639 | 5.8 | Mon Mar 20 09:18:22.3 2017 PDT +(1 row) + +CREATE TABLE date_col_test(time date, temp float8, device text NOT NULL); +SELECT create_hypertable('date_col_test', 'time', 'device', 1000, chunk_time_interval => INTERVAL '1 Day'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (4,public,date_col_test,t) +(1 row) + +INSERT INTO date_col_test +VALUES ('2001-02-01', 98, 'dev1'), +('2001-03-02', 98, 'dev1'); +SELECT * FROM date_col_test WHERE time > '2001-01-01'; + time | temp | device +------------+------+-------- + 02-01-2001 | 98 | dev1 + 03-02-2001 | 98 | dev1 +(2 rows) + +-- Out-of-order insertion regression test. +-- this used to trip an assert in subspace_store.c checking that +-- max_open_chunks_per_insert was obeyed +set timescaledb.max_open_chunks_per_insert=1; +CREATE TABLE chunk_assert_fail(i bigint, j bigint); +SELECT create_hypertable('chunk_assert_fail', 'i', 'j', 1000, chunk_time_interval=>1); +NOTICE: adding not-null constraint to column "i" + create_hypertable +-------------------------------- + (5,public,chunk_assert_fail,t) +(1 row) + +insert into chunk_assert_fail values (1, 1), (1, 2), (2,1); +select * from chunk_assert_fail; + i | j +---+--- + 1 | 1 + 1 | 2 + 2 | 1 +(3 rows) + +CREATE TABLE one_space_test(time timestamp, temp float8, device text NOT NULL); +SELECT create_hypertable('one_space_test', 'time', 'device', 1); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_hypertable +----------------------------- + (6,public,one_space_test,t) +(1 row) + +INSERT INTO one_space_test VALUES +('2001-01-01 01:01:01', 1.0, 'device'), +('2002-01-01 01:02:01', 1.0, 'device'); +SELECT * FROM one_space_test; + time | temp | device +--------------------------+------+-------- + Mon Jan 01 01:01:01 2001 | 1 | device + Tue Jan 01 01:02:01 2002 | 1 | device +(2 rows) + +--CTE & EXPLAIN ANALYZE TESTS +WITH insert_cte as ( + INSERT INTO one_space_test VALUES + ('2001-01-01 01:02:01', 1.0, 'device') + RETURNING *) +SELECT * FROM insert_cte; + time | temp | device +--------------------------+------+-------- + Mon Jan 01 01:02:01 2001 | 1 | device +(1 row) + +EXPLAIN (analyze, costs off, timing off) --can't turn summary off in 9.6 so instead grep it away at end. +WITH insert_cte as ( + INSERT INTO one_space_test VALUES + ('2001-01-01 01:03:01', 1.0, 'device') + ) +SELECT 1 \g | grep -v "Planning" | grep -v "Execution" + QUERY PLAN +------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + CTE insert_cte + -> Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Insert on one_space_test (actual rows=0 loops=1) + -> Custom Scan (ChunkDispatch) (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) +(8 rows) + +-- INSERTs can exclude chunks based on constraints +EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail; + QUERY PLAN +------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on chunk_assert_fail + -> Custom Scan (ChunkDispatch) + -> Append + -> Seq Scan on _hyper_5_11_chunk + -> Seq Scan on _hyper_5_12_chunk + -> Seq Scan on _hyper_5_13_chunk +(7 rows) + +EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i < 1; + QUERY PLAN +-------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on chunk_assert_fail + -> Custom Scan (ChunkDispatch) + -> Result + One-Time Filter: false +(5 rows) + +EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i = 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on chunk_assert_fail + -> Custom Scan (ChunkDispatch) + -> Append + -> Index Scan using _hyper_5_11_chunk_chunk_assert_fail_i_idx on _hyper_5_11_chunk + Index Cond: (i = 1) + -> Index Scan using _hyper_5_12_chunk_chunk_assert_fail_i_idx on _hyper_5_12_chunk + Index Cond: (i = 1) +(8 rows) + +EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i > 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on chunk_assert_fail + -> Custom Scan (ChunkDispatch) + -> Index Scan using _hyper_5_13_chunk_chunk_assert_fail_i_idx on _hyper_5_13_chunk + Index Cond: (i > 1) +(5 rows) + +INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i > 1; +EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time < 'infinity' LIMIT 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on one_space_test + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Scan using _hyper_6_14_chunk_one_space_test_time_idx on _hyper_6_14_chunk + Index Cond: ("time" < 'infinity'::timestamp without time zone) + -> Index Scan using _hyper_6_15_chunk_one_space_test_time_idx on _hyper_6_15_chunk + Index Cond: ("time" < 'infinity'::timestamp without time zone) +(9 rows) + +EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time >= 'infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on one_space_test + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time <= '-infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on one_space_test + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time > '-infinity' LIMIT 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on one_space_test + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Scan using _hyper_6_14_chunk_one_space_test_time_idx on _hyper_6_14_chunk + Index Cond: ("time" > '-infinity'::timestamp without time zone) + -> Index Scan using _hyper_6_15_chunk_one_space_test_time_idx on _hyper_6_15_chunk + Index Cond: ("time" > '-infinity'::timestamp without time zone) +(9 rows) + +INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time < 'infinity' LIMIT 1; +INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time >= 'infinity' LIMIT 1; +INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time <= '-infinity' LIMIT 1; +INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time > '-infinity' LIMIT 1; +CREATE TABLE timestamp_inf(time TIMESTAMP); +SELECT create_hypertable('timestamp_inf', 'time'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (7,public,timestamp_inf,t) +(1 row) + +INSERT INTO timestamp_inf VALUES ('2018/01/02'), ('2019/01/02'); +EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf + WHERE time < 'infinity' LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on timestamp_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Only Scan using _hyper_7_16_chunk_timestamp_inf_time_idx on _hyper_7_16_chunk + Index Cond: ("time" < 'infinity'::timestamp without time zone) + -> Index Only Scan using _hyper_7_17_chunk_timestamp_inf_time_idx on _hyper_7_17_chunk + Index Cond: ("time" < 'infinity'::timestamp without time zone) +(9 rows) + +EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf + WHERE time >= 'infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on timestamp_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf + WHERE time <= '-infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on timestamp_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf + WHERE time > '-infinity' LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on timestamp_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Only Scan using _hyper_7_16_chunk_timestamp_inf_time_idx on _hyper_7_16_chunk + Index Cond: ("time" > '-infinity'::timestamp without time zone) + -> Index Only Scan using _hyper_7_17_chunk_timestamp_inf_time_idx on _hyper_7_17_chunk + Index Cond: ("time" > '-infinity'::timestamp without time zone) +(9 rows) + +CREATE TABLE date_inf(time DATE); +SELECT create_hypertable('date_inf', 'time'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +----------------------- + (8,public,date_inf,t) +(1 row) + +INSERT INTO date_inf VALUES ('2018/01/02'), ('2019/01/02'); +EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf + WHERE time < 'infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on date_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Only Scan using _hyper_8_18_chunk_date_inf_time_idx on _hyper_8_18_chunk + Index Cond: ("time" < 'infinity'::date) + -> Index Only Scan using _hyper_8_19_chunk_date_inf_time_idx on _hyper_8_19_chunk + Index Cond: ("time" < 'infinity'::date) +(9 rows) + +EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf + WHERE time >= 'infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on date_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf + WHERE time <= '-infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on date_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Result + One-Time Filter: false +(6 rows) + +EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf + WHERE time > '-infinity' LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Insert on date_inf + -> Custom Scan (ChunkDispatch) + -> Limit + -> Append + -> Index Only Scan using _hyper_8_18_chunk_date_inf_time_idx on _hyper_8_18_chunk + Index Cond: ("time" > '-infinity'::date) + -> Index Only Scan using _hyper_8_19_chunk_date_inf_time_idx on _hyper_8_19_chunk + Index Cond: ("time" > '-infinity'::date) +(9 rows) + +-- test INSERT with cached plans / plpgsql functions +-- https://github.com/timescale/timescaledb/issues/1809 +CREATE TABLE status_table(a int, b int, last_ts timestamptz, UNIQUE(a,b)); +CREATE TABLE metrics(time timestamptz NOT NULL, value float); +CREATE TABLE metrics2(time timestamptz NOT NULL, value float); +SELECT (create_hypertable(t,'time')).table_name FROM (VALUES ('metrics'),('metrics2')) v(t); + table_name +------------ + metrics + metrics2 +(2 rows) + +INSERT INTO metrics VALUES ('2000-01-01',random()), ('2000-02-01',random()), ('2000-03-01',random()); +CREATE OR REPLACE FUNCTION insert_test() RETURNS VOID LANGUAGE plpgsql AS +$$ + DECLARE + r RECORD; + BEGIN + FOR r IN + SELECT * FROM metrics + LOOP + WITH foo AS ( + INSERT INTO metrics2 SELECT * FROM metrics RETURNING * + ) + INSERT INTO status_table (a,b, last_ts) + VALUES (1,1, now()) + ON CONFLICT (a,b) DO UPDATE SET last_ts=(SELECT max(time) FROM metrics); + END LOOP; + END; +$$; +SELECT insert_test(), insert_test(), insert_test(); + insert_test | insert_test | insert_test +-------------+-------------+------------- + | | +(1 row) + +-- test Postgres crashes on INSERT ... SELECT ... WHERE NOT EXISTS with empty table +-- https://github.com/timescale/timescaledb/issues/1883 +CREATE TABLE readings ( + toe TIMESTAMPTZ NOT NULL, + sensor_id INT NOT NULL, + value INT NOT NULL +); +SELECT create_hypertable( + 'readings', + 'toe', + chunk_time_interval => interval '1 day', + if_not_exists => TRUE, + migrate_data => TRUE +); + create_hypertable +------------------------ + (11,public,readings,t) +(1 row) + +EXPLAIN (costs off) +INSERT INTO readings +SELECT '2020-05-09 10:34:35.296288+00', 1, 0 +WHERE NOT EXISTS ( + SELECT 1 + FROM readings + WHERE sensor_id = 1 + AND toe = '2020-05-09 10:34:35.296288+00' +); + QUERY PLAN +----------------------------------------------- + Custom Scan (HypertableModify) + InitPlan 1 (returns $0) + -> Result + One-Time Filter: false + -> Insert on readings + -> Custom Scan (ChunkDispatch) + -> Result + One-Time Filter: (NOT $0) +(8 rows) + +INSERT INTO readings +SELECT '2020-05-09 10:34:35.296288+00', 1, 0 +WHERE NOT EXISTS ( + SELECT 1 + FROM readings + WHERE sensor_id = 1 + AND toe = '2020-05-09 10:34:35.296288+00' +); +DROP TABLE readings; +CREATE TABLE sample_table ( + sequence INTEGER NOT NULL, + time TIMESTAMP WITHOUT TIME ZONE NOT NULL, + value NUMERIC NOT NULL, + UNIQUE (sequence, time) +); +SELECT * FROM create_hypertable('sample_table', 'time', + chunk_time_interval => INTERVAL '1 day'); +WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 12 | public | sample_table | t +(1 row) + +INSERT INTO sample_table (sequence,time,value) VALUES + (7, generate_series(TIMESTAMP '2019-08-01', TIMESTAMP '2019-08-10', INTERVAL '10 minutes'), ROUND(RANDOM()*10)::int); +\set ON_ERROR_STOP 0 +INSERT INTO sample_table (sequence,time,value) VALUES + (7, generate_series(TIMESTAMP '2019-07-21', TIMESTAMP '2019-08-01', INTERVAL '10 minutes'), ROUND(RANDOM()*10)::int); +ERROR: duplicate key value violates unique constraint "27_1_sample_table_sequence_time_key" +\set ON_ERROR_STOP 1 +INSERT INTO sample_table (sequence,time,value) VALUES + (7,generate_series(TIMESTAMP '2019-01-01', TIMESTAMP '2019-07-01', '10 minutes'), ROUND(RANDOM()*10)::int); +DROP TABLE sample_table; +-- test on conflict clause on columns with default value +-- issue #3037 +CREATE TABLE i3037(time timestamptz PRIMARY KEY); +SELECT create_hypertable('i3037','time'); + create_hypertable +--------------------- + (13,public,i3037,t) +(1 row) + +ALTER TABLE i3037 ADD COLUMN value float DEFAULT 0; +INSERT INTO i3037 VALUES ('2000-01-01'); +INSERT INTO i3037 VALUES ('2000-01-01') ON CONFLICT(time) DO UPDATE SET value = EXCLUDED.value; diff --git a/test/expected/plan_expand_hypertable.out b/test/expected/plan_expand_hypertable-13.out similarity index 100% rename from test/expected/plan_expand_hypertable.out rename to test/expected/plan_expand_hypertable-13.out diff --git a/test/expected/plan_expand_hypertable-14.out b/test/expected/plan_expand_hypertable-14.out new file mode 100644 index 00000000000..cd939c7bd71 --- /dev/null +++ b/test/expected/plan_expand_hypertable-14.out @@ -0,0 +1,3026 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\set PREFIX 'EXPLAIN (costs off) ' +\ir include/plan_expand_hypertable_load.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +--single time dimension +CREATE TABLE hyper ("time_broken" bigint NOT NULL, "value" integer); +ALTER TABLE hyper +DROP COLUMN time_broken, +ADD COLUMN time BIGINT; +SELECT create_hypertable('hyper', 'time', chunk_time_interval => 10); +psql:include/plan_expand_hypertable_load.sql:12: NOTICE: adding not-null constraint to column "time" + create_hypertable +-------------------- + (1,public,hyper,t) +(1 row) + +INSERT INTO hyper SELECT g, g FROM generate_series(0,1000) g; +--insert a point with INT_MAX_64 +INSERT INTO hyper (time, value) SELECT 9223372036854775807::bigint, 0; +--time and space +CREATE TABLE hyper_w_space ("time_broken" bigint NOT NULL, "device_id" text, "value" integer); +ALTER TABLE hyper_w_space +DROP COLUMN time_broken, +ADD COLUMN time BIGINT; +SELECT create_hypertable('hyper_w_space', 'time', 'device_id', 4, chunk_time_interval => 10); +psql:include/plan_expand_hypertable_load.sql:26: NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (2,public,hyper_w_space,t) +(1 row) + +INSERT INTO hyper_w_space (time, device_id, value) SELECT g, 'dev' || g, g FROM generate_series(0,30) g; +CREATE VIEW hyper_w_space_view AS (SELECT * FROM hyper_w_space); +--with timestamp and space +CREATE TABLE tag (id serial PRIMARY KEY, name text); +CREATE TABLE hyper_ts ("time_broken" timestamptz NOT NULL, "device_id" text, tag_id INT REFERENCES tag(id), "value" integer); +ALTER TABLE hyper_ts +DROP COLUMN time_broken, +ADD COLUMN time TIMESTAMPTZ; +SELECT create_hypertable('hyper_ts', 'time', 'device_id', 2, chunk_time_interval => '10 seconds'::interval); +psql:include/plan_expand_hypertable_load.sql:41: NOTICE: adding not-null constraint to column "time" + create_hypertable +----------------------- + (3,public,hyper_ts,t) +(1 row) + +INSERT INTO tag(name) SELECT 'tag'||g FROM generate_series(0,10) g; +INSERT INTO hyper_ts (time, device_id, tag_id, value) SELECT to_timestamp(g), 'dev' || g, (random() /10)+1, g FROM generate_series(0,30) g; +--one in the future +INSERT INTO hyper_ts (time, device_id, tag_id, value) VALUES ('2100-01-01 02:03:04 PST', 'dev101', 1, 0); +--time partitioning function +CREATE OR REPLACE FUNCTION unix_to_timestamp(unixtime float8) + RETURNS TIMESTAMPTZ LANGUAGE SQL IMMUTABLE PARALLEL SAFE STRICT AS +$BODY$ + SELECT to_timestamp(unixtime); +$BODY$; +CREATE TABLE hyper_timefunc ("time" float8 NOT NULL, "device_id" text, "value" integer); +SELECT create_hypertable('hyper_timefunc', 'time', 'device_id', 4, chunk_time_interval => 10, time_partitioning_func => 'unix_to_timestamp'); +psql:include/plan_expand_hypertable_load.sql:57: WARNING: unexpected interval: smaller than one second + create_hypertable +----------------------------- + (4,public,hyper_timefunc,t) +(1 row) + +INSERT INTO hyper_timefunc (time, device_id, value) SELECT g, 'dev' || g, g FROM generate_series(0,30) g; +CREATE TABLE metrics_timestamp(time timestamp); +SELECT create_hypertable('metrics_timestamp','time'); +psql:include/plan_expand_hypertable_load.sql:62: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +psql:include/plan_expand_hypertable_load.sql:62: NOTICE: adding not-null constraint to column "time" + create_hypertable +-------------------------------- + (5,public,metrics_timestamp,t) +(1 row) + +INSERT INTO metrics_timestamp SELECT generate_series('2000-01-01'::timestamp,'2000-02-01'::timestamp,'1d'::interval); +CREATE TABLE metrics_timestamptz(time timestamptz, device_id int); +SELECT create_hypertable('metrics_timestamptz','time'); +psql:include/plan_expand_hypertable_load.sql:66: NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------------- + (6,public,metrics_timestamptz,t) +(1 row) + +INSERT INTO metrics_timestamptz SELECT generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval), 1; +INSERT INTO metrics_timestamptz SELECT generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval), 2; +INSERT INTO metrics_timestamptz SELECT generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval), 3; +--create a second table to test joins with +CREATE TABLE metrics_timestamptz_2 (LIKE metrics_timestamptz); +SELECT create_hypertable('metrics_timestamptz_2','time'); + create_hypertable +------------------------------------ + (7,public,metrics_timestamptz_2,t) +(1 row) + +INSERT INTO metrics_timestamptz_2 +SELECT * FROM metrics_timestamptz; +INSERT INTO metrics_timestamptz_2 VALUES ('2000-12-01'::timestamptz, 3); +CREATE TABLE metrics_date(time date); +SELECT create_hypertable('metrics_date','time'); +psql:include/plan_expand_hypertable_load.sql:79: NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------- + (8,public,metrics_date,t) +(1 row) + +INSERT INTO metrics_date SELECT generate_series('2000-01-01'::date,'2000-02-01'::date,'1d'::interval); +ANALYZE hyper; +ANALYZE hyper_w_space; +ANALYZE tag; +ANALYZE hyper_ts; +ANALYZE hyper_timefunc; +-- create normal table for JOIN tests +CREATE TABLE regular_timestamptz(time timestamptz); +INSERT INTO regular_timestamptz SELECT generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval); +\ir include/plan_expand_hypertable_query.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +--we want to see how our logic excludes chunks +--and not how much work constraint_exclusion does +SET constraint_exclusion = 'off'; +\qecho test upper bounds +test upper bounds +:PREFIX SELECT * FROM hyper WHERE time < 10 ORDER BY value; + QUERY PLAN +------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time < 11 ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 11) + -> Seq Scan on _hyper_1_2_chunk + Filter: ("time" < 11) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE time = 10 ORDER BY value; + QUERY PLAN +------------------------------------ + Sort + Sort Key: _hyper_1_2_chunk.value + -> Seq Scan on _hyper_1_2_chunk + Filter: ("time" = 10) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE 10 >= time ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (10 >= "time") + -> Seq Scan on _hyper_1_2_chunk + Filter: (10 >= "time") +(7 rows) + +\qecho test lower bounds +test lower bounds +:PREFIX SELECT * FROM hyper WHERE time >= 10 and time < 20 ORDER BY value; + QUERY PLAN +---------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk.value + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" >= 10) AND ("time" < 20)) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE 10 < time and 20 >= time ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk.value + -> Append + -> Seq Scan on _hyper_1_2_chunk + Filter: ((10 < "time") AND (20 >= "time")) + -> Seq Scan on _hyper_1_3_chunk + Filter: ((10 < "time") AND (20 >= "time")) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE time >= 9 and time < 20 ORDER BY value; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 9) AND ("time" < 20)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" >= 9) AND ("time" < 20)) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE time > 9 and time < 20 ORDER BY value; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk.value + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 9) AND ("time" < 20)) +(4 rows) + +\qecho test empty result +test empty result +:PREFIX SELECT * FROM hyper WHERE time < 0; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +\qecho test expression evaluation +test expression evaluation +:PREFIX SELECT * FROM hyper WHERE time < (5*2)::smallint; + QUERY PLAN +------------------------------------- + Seq Scan on _hyper_1_1_chunk + Filter: ("time" < '10'::smallint) +(2 rows) + +\qecho test logic at INT64_MAX +test logic at INT64_MAX +:PREFIX SELECT * FROM hyper WHERE time = 9223372036854775807::bigint ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_1_102_chunk.value + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" = '9223372036854775807'::bigint) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time = 9223372036854775806::bigint ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_1_102_chunk.value + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" = '9223372036854775806'::bigint) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time >= 9223372036854775807::bigint ORDER BY value; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: _hyper_1_102_chunk.value + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" >= '9223372036854775807'::bigint) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time > 9223372036854775807::bigint ORDER BY value; + QUERY PLAN +-------------------------------- + Sort + Sort Key: value + -> Result + One-Time Filter: false +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time > 9223372036854775806::bigint ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_1_102_chunk.value + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" > '9223372036854775806'::bigint) +(4 rows) + +\qecho cte +cte +:PREFIX WITH cte AS( + SELECT * FROM hyper WHERE time < 10 +) +SELECT * FROM cte ORDER BY value; + QUERY PLAN +------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) +(4 rows) + +\qecho subquery +subquery +:PREFIX SELECT 0 = ANY (SELECT value FROM hyper WHERE time < 10); + QUERY PLAN +-------------------------------------- + Result + SubPlan 1 + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) +(4 rows) + +\qecho no space constraint +no space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 ORDER BY value; + QUERY PLAN +-------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < 10) +(11 rows) + +\qecho valid space constraint +valid space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and device_id = 'dev5' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and 'dev5' = device_id ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND ('dev5'::text = device_id)) +(4 rows) + +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and 'dev'||(2+3) = device_id ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND ('dev5'::text = device_id)) +(4 rows) + +\qecho only space constraint +only space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE 'dev5' = device_id ORDER BY value; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Append + -> Seq Scan on _hyper_2_106_chunk + Filter: ('dev5'::text = device_id) + -> Seq Scan on _hyper_2_109_chunk + Filter: ('dev5'::text = device_id) + -> Seq Scan on _hyper_2_111_chunk + Filter: ('dev5'::text = device_id) +(9 rows) + +\qecho unhandled space constraint +unhandled space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and device_id > 'dev5' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) + -> Seq Scan on _hyper_2_104_chunk + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) +(11 rows) + +\qecho use of OR - does not filter chunks +use of OR - does not filter chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND (device_id = 'dev5' or device_id = 'dev6') ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) + -> Seq Scan on _hyper_2_104_chunk + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) +(11 rows) + +\qecho cte +cte +:PREFIX WITH cte AS( + SELECT * FROM hyper_w_space WHERE time < 10 and device_id = 'dev5' +) +SELECT * FROM cte ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho subquery +subquery +:PREFIX SELECT 0 = ANY (SELECT value FROM hyper_w_space WHERE time < 10 and device_id = 'dev5'); + QUERY PLAN +------------------------------------------------------------------ + Result + SubPlan 1 + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho view +view +:PREFIX SELECT * FROM hyper_w_space_view WHERE time < 10 and device_id = 'dev5' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho IN statement - simple +IN statement - simple +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id IN ('dev5') ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho IN statement - two chunks +IN statement - two chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id IN ('dev5','dev6') ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_105_chunk.value + -> Append + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[]))) +(7 rows) + +\qecho IN statement - one chunk +IN statement - one chunk +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id IN ('dev4','dev5') ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev4,dev5}'::text[]))) +(4 rows) + +\qecho NOT IN - does not filter chunks +NOT IN - does not filter chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id NOT IN ('dev5','dev6') ORDER BY value; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND (device_id <> ALL ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_104_chunk + Filter: (("time" < 10) AND (device_id <> ALL ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id <> ALL ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id <> ALL ('{dev5,dev6}'::text[]))) +(11 rows) + +\qecho IN statement with subquery - does not filter chunks +IN statement with subquery - does not filter chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id IN (SELECT 'dev5'::text) ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho ANY +ANY +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id = ANY(ARRAY['dev5','dev6']) ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_105_chunk.value + -> Append + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[]))) +(7 rows) + +\qecho ANY with intersection +ANY with intersection +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id = ANY(ARRAY['dev5','dev6']) AND device_id = ANY(ARRAY['dev6','dev7']) ORDER BY value; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_105_chunk.value + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[])) AND (device_id = ANY ('{dev6,dev7}'::text[]))) +(4 rows) + +\qecho ANY without intersection shouldnt scan any chunks +ANY without intersection shouldnt scan any chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id = ANY(ARRAY['dev5','dev6']) AND device_id = ANY(ARRAY['dev8','dev9']) ORDER BY value; + QUERY PLAN +-------------------------------- + Sort + Sort Key: value + -> Result + One-Time Filter: false +(4 rows) + +\qecho ANY/IN/ALL only works for equals operator +ANY/IN/ALL only works for equals operator +:PREFIX SELECT * FROM hyper_w_space WHERE device_id < ANY(ARRAY['dev5','dev6']) ORDER BY value; + QUERY PLAN +----------------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_107_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_108_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_109_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_110_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_111_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_112_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_113_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_114_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_115_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) +(29 rows) + +\qecho ALL with equals and different values shouldnt scan any chunks +ALL with equals and different values shouldnt scan any chunks +:PREFIX SELECT * FROM hyper_w_space WHERE device_id = ALL(ARRAY['dev5','dev6']) ORDER BY value; + QUERY PLAN +-------------------------------- + Sort + Sort Key: value + -> Result + One-Time Filter: false +(4 rows) + +\qecho Multi AND +Multi AND +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND time < 100 ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND ("time" < 100)) + -> Seq Scan on _hyper_2_104_chunk + Filter: (("time" < 10) AND ("time" < 100)) + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND ("time" < 100)) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND ("time" < 100)) +(11 rows) + +\qecho Time dimension doesnt filter chunks when using IN/ANY with multiple arguments +Time dimension doesnt filter chunks when using IN/ANY with multiple arguments +:PREFIX SELECT * FROM hyper_w_space WHERE time < ANY(ARRAY[1,2]) ORDER BY value; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_107_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_108_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_109_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_110_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_111_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_112_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_113_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_114_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_115_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) +(29 rows) + +\qecho Time dimension chunk filtering works for ANY with single argument +Time dimension chunk filtering works for ANY with single argument +:PREFIX SELECT * FROM hyper_w_space WHERE time < ANY(ARRAY[1]) ORDER BY value; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < ANY ('{1}'::integer[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < ANY ('{1}'::integer[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < ANY ('{1}'::integer[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < ANY ('{1}'::integer[])) +(11 rows) + +\qecho Time dimension chunk filtering works for ALL with single argument +Time dimension chunk filtering works for ALL with single argument +:PREFIX SELECT * FROM hyper_w_space WHERE time < ALL(ARRAY[1]) ORDER BY value; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < ALL ('{1}'::integer[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < ALL ('{1}'::integer[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < ALL ('{1}'::integer[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < ALL ('{1}'::integer[])) +(11 rows) + +\qecho Time dimension chunk filtering works for ALL with multiple arguments +Time dimension chunk filtering works for ALL with multiple arguments +:PREFIX SELECT * FROM hyper_w_space WHERE time < ALL(ARRAY[1,10,20,30]) ORDER BY value; + QUERY PLAN +------------------------------------------------------------------ + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < ALL ('{1,10,20,30}'::integer[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < ALL ('{1,10,20,30}'::integer[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < ALL ('{1,10,20,30}'::integer[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < ALL ('{1,10,20,30}'::integer[])) +(11 rows) + +\qecho AND intersection using IN and EQUALS +AND intersection using IN and EQUALS +:PREFIX SELECT * FROM hyper_w_space WHERE device_id IN ('dev1','dev2') AND device_id = 'dev1' ORDER BY value; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ((device_id = ANY ('{dev1,dev2}'::text[])) AND (device_id = 'dev1'::text)) + -> Seq Scan on _hyper_2_110_chunk + Filter: ((device_id = ANY ('{dev1,dev2}'::text[])) AND (device_id = 'dev1'::text)) + -> Seq Scan on _hyper_2_114_chunk + Filter: ((device_id = ANY ('{dev1,dev2}'::text[])) AND (device_id = 'dev1'::text)) +(9 rows) + +\qecho AND with no intersection using IN and EQUALS +AND with no intersection using IN and EQUALS +:PREFIX SELECT * FROM hyper_w_space WHERE device_id IN ('dev1','dev2') AND device_id = 'dev3' ORDER BY value; + QUERY PLAN +-------------------------------- + Sort + Sort Key: value + -> Result + One-Time Filter: false +(4 rows) + +\qecho timestamps +timestamps +\qecho these should work since they are immutable functions +these should work since they are immutable functions +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969 PST'::timestamptz ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Append + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) +(7 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < to_timestamp(10) ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Append + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) +(7 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969'::timestamp AT TIME ZONE 'PST' ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Append + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) +(7 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(4 rows) + +\qecho these should not work since uses stable functions; +these should not work since uses stable functions; +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969'::timestamp ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------ + Sort + Sort Key: hyper_ts.value + -> Custom Scan (ChunkAppend) on hyper_ts + Chunks excluded during startup: 6 + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) +(8 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < ('Wed Dec 31 16:00:10 1969'::timestamp::timestamptz) ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Custom Scan (ChunkAppend) on hyper_ts + Chunks excluded during startup: 6 + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) +(8 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE NOW() < time ORDER BY value; + QUERY PLAN +--------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Custom Scan (ChunkAppend) on hyper_ts + Chunks excluded during startup: 7 + -> Seq Scan on _hyper_3_123_chunk + Filter: (now() < "time") +(6 rows) + +\qecho joins +joins +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.id=1) and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Nested Loop Semi Join + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text) AND (tag_id = 1)) + -> Seq Scan on tag + Filter: (id = 1) +(7 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.id=1) or (time < to_timestamp(10) and device_id = 'dev1') ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Custom Scan (ChunkAppend) on hyper_ts + -> Seq Scan on _hyper_3_116_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + SubPlan 1 + -> Seq Scan on tag + Filter: (id = 1) + -> Seq Scan on _hyper_3_117_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_118_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_119_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_120_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_121_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_122_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_123_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) +(22 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.name='tag1') and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Nested Loop + Join Filter: (_hyper_3_116_chunk.tag_id = tag.id) + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) + -> Seq Scan on tag + Filter: (name = 'tag1'::text) +(8 rows) + +:PREFIX SELECT * FROM hyper_ts JOIN tag on (hyper_ts.tag_id = tag.id ) WHERE time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Merge Join + Merge Cond: (tag.id = _hyper_3_116_chunk.tag_id) + -> Index Scan using tag_pkey on tag + -> Sort + Sort Key: _hyper_3_116_chunk.tag_id + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(9 rows) + +:PREFIX SELECT * FROM hyper_ts JOIN tag on (hyper_ts.tag_id = tag.id ) WHERE tag.name = 'tag1' and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Nested Loop + Join Filter: (_hyper_3_116_chunk.tag_id = tag.id) + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) + -> Seq Scan on tag + Filter: (name = 'tag1'::text) +(8 rows) + +\qecho test constraint exclusion for constraints in ON clause of JOINs +test constraint exclusion for constraints in ON clause of JOINs +\qecho should exclude chunks on m1 and propagate qual to m2 because of INNER JOIN +should exclude chunks on m1 and propagate qual to m2 because of INNER JOIN +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +\qecho should exclude chunks on m2 and propagate qual to m1 because of INNER JOIN +should exclude chunks on m2 and propagate qual to m1 because of INNER JOIN +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +\qecho must not exclude on m1 +must not exclude on m1 +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + Join Filter: (m1."time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(19 rows) + +\qecho should exclude chunks on m2 +should exclude chunks on m2 +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(24 rows) + +\qecho should exclude chunks on m1 +should exclude chunks on m1 +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: m1."time" + -> Merge Right Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(25 rows) + +\qecho must not exclude chunks on m2 +must not exclude chunks on m2 +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: m1."time" + -> Merge Left Join + Merge Cond: (m2."time" = m1."time") + Join Filter: (m2."time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 +(21 rows) + +\qecho time_bucket exclusion +time_bucket exclusion +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) < 10::bigint ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < '10'::bigint) AND (time_bucket('10'::bigint, "time") < '10'::bigint)) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) < 11::bigint ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < '21'::bigint) AND (time_bucket('10'::bigint, "time") < '11'::bigint)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" < '21'::bigint) AND (time_bucket('10'::bigint, "time") < '11'::bigint)) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" < '21'::bigint) AND (time_bucket('10'::bigint, "time") < '11'::bigint)) +(9 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) <= 10::bigint ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" <= '20'::bigint) AND (time_bucket('10'::bigint, "time") <= '10'::bigint)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" <= '20'::bigint) AND (time_bucket('10'::bigint, "time") <= '10'::bigint)) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" <= '20'::bigint) AND (time_bucket('10'::bigint, "time") <= '10'::bigint)) +(9 rows) + +:PREFIX SELECT * FROM hyper WHERE 10::bigint > time_bucket(10, time) ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < '10'::bigint) AND ('10'::bigint > time_bucket('10'::bigint, "time"))) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE 11::bigint > time_bucket(10, time) ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < '21'::bigint) AND ('11'::bigint > time_bucket('10'::bigint, "time"))) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" < '21'::bigint) AND ('11'::bigint > time_bucket('10'::bigint, "time"))) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" < '21'::bigint) AND ('11'::bigint > time_bucket('10'::bigint, "time"))) +(9 rows) + +\qecho test overflow behaviour of time_bucket exclusion +test overflow behaviour of time_bucket exclusion +:PREFIX SELECT * FROM hyper WHERE time > 950 AND time_bucket(10, time) < '9223372036854775807'::bigint ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_96_chunk."time" + -> Append + -> Seq Scan on _hyper_1_96_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_97_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_98_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_99_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_100_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_101_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_102_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) +(17 rows) + +\qecho test timestamp upper boundary +test timestamp upper boundary +\qecho there should be no transformation if we are out of the supported (TimescaleDB-specific) range +there should be no transformation if we are out of the supported (TimescaleDB-specific) range +:PREFIX SELECT * FROM metrics_timestamp WHERE time_bucket('1d',time) < '294276-01-01'::timestamp ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Custom Scan (ChunkAppend) on metrics_timestamp + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_5_155_chunk_metrics_timestamp_time_idx on _hyper_5_155_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_156_chunk_metrics_timestamp_time_idx on _hyper_5_156_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_157_chunk_metrics_timestamp_time_idx on _hyper_5_157_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_158_chunk_metrics_timestamp_time_idx on _hyper_5_158_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_159_chunk_metrics_timestamp_time_idx on _hyper_5_159_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) +(12 rows) + +\qecho transformation would be out of range +transformation would be out of range +:PREFIX SELECT * FROM metrics_timestamp WHERE time_bucket('1000d',time) < '294276-01-01'::timestamp ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_5_155_chunk_metrics_timestamp_time_idx on _hyper_5_155_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_156_chunk_metrics_timestamp_time_idx on _hyper_5_156_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_157_chunk_metrics_timestamp_time_idx on _hyper_5_157_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_158_chunk_metrics_timestamp_time_idx on _hyper_5_158_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_159_chunk_metrics_timestamp_time_idx on _hyper_5_159_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) +(12 rows) + +\qecho test timestamptz upper boundary +test timestamptz upper boundary +\qecho there should be no transformation if we are out of the supported (TimescaleDB-specific) range +there should be no transformation if we are out of the supported (TimescaleDB-specific) range +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1d',time) < '294276-01-01'::timestamptz ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) +(12 rows) + +\qecho transformation would be out of range +transformation would be out of range +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1000d',time) < '294276-01-01'::timestamptz ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) +(12 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) > 10 AND time_bucket(10, time) < 100 ORDER BY time; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk."time" + -> Append + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_4_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_5_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_6_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_7_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_8_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_9_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_10_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) +(21 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) > 10 AND time_bucket(10, time) < 20 ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk."time" + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 10) AND ("time" < '20'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 20)) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(1, time) > 11 AND time_bucket(1, time) < 19 ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk."time" + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 11) AND ("time" < '19'::bigint) AND (time_bucket('1'::bigint, "time") > 11) AND (time_bucket('1'::bigint, "time") < 19)) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE 10 < time_bucket(10, time) AND 20 > time_bucket(10,time) ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk."time" + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 10) AND ("time" < '20'::bigint) AND (10 < time_bucket('10'::bigint, "time")) AND (20 > time_bucket('10'::bigint, "time"))) +(4 rows) + +\qecho time_bucket exclusion with date +time_bucket exclusion with date +:PREFIX SELECT * FROM metrics_date WHERE time_bucket('1d',time) < '2000-01-03' ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Index Only Scan Backward using _hyper_8_171_chunk_metrics_date_time_idx on _hyper_8_171_chunk + Index Cond: ("time" < '01-03-2000'::date) + Filter: (time_bucket('@ 1 day'::interval, "time") < '01-03-2000'::date) +(3 rows) + +:PREFIX SELECT * FROM metrics_date WHERE time_bucket('1d',time) >= '2000-01-03' AND time_bucket('1d',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_date + Order: metrics_date."time" + -> Index Only Scan Backward using _hyper_8_171_chunk_metrics_date_time_idx on _hyper_8_171_chunk + Index Cond: (("time" >= '01-03-2000'::date) AND ("time" <= '01-11-2000'::date)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= '01-03-2000'::date) AND (time_bucket('@ 1 day'::interval, "time") <= '01-10-2000'::date)) + -> Index Only Scan Backward using _hyper_8_172_chunk_metrics_date_time_idx on _hyper_8_172_chunk + Index Cond: (("time" >= '01-03-2000'::date) AND ("time" <= '01-11-2000'::date)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= '01-03-2000'::date) AND (time_bucket('@ 1 day'::interval, "time") <= '01-10-2000'::date)) +(8 rows) + +\qecho time_bucket exclusion with timestamp +time_bucket exclusion with timestamp +:PREFIX SELECT * FROM metrics_timestamp WHERE time_bucket('1d',time) < '2000-01-03' ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Index Only Scan Backward using _hyper_5_155_chunk_metrics_timestamp_time_idx on _hyper_5_155_chunk + Index Cond: ("time" < 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) +(3 rows) + +:PREFIX SELECT * FROM metrics_timestamp WHERE time_bucket('1d',time) >= '2000-01-03' AND time_bucket('1d',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_5_155_chunk_metrics_timestamp_time_idx on _hyper_5_155_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) AND ("time" <= 'Tue Jan 11 00:00:00 2000'::timestamp without time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) AND (time_bucket('@ 1 day'::interval, "time") <= 'Mon Jan 10 00:00:00 2000'::timestamp without time zone)) + -> Index Only Scan Backward using _hyper_5_156_chunk_metrics_timestamp_time_idx on _hyper_5_156_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) AND ("time" <= 'Tue Jan 11 00:00:00 2000'::timestamp without time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) AND (time_bucket('@ 1 day'::interval, "time") <= 'Mon Jan 10 00:00:00 2000'::timestamp without time zone)) +(8 rows) + +\qecho time_bucket exclusion with timestamptz +time_bucket exclusion with timestamptz +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('6h',time) < '2000-01-03' ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: ("time" < 'Mon Jan 03 06:00:00 2000 PST'::timestamp with time zone) + Filter: (time_bucket('@ 6 hours'::interval, "time") < 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) +(3 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('6h',time) >= '2000-01-03' AND time_bucket('6h',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 10 06:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 6 hours'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 6 hours'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 10 06:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 6 hours'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 6 hours'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(8 rows) + +\qecho time_bucket exclusion with timestamptz and day interval +time_bucket exclusion with timestamptz and day interval +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1d',time) < '2000-01-03' ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: ("time" < 'Tue Jan 04 00:00:00 2000 PST'::timestamp with time zone) + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) +(3 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1d',time) >= '2000-01-03' AND time_bucket('1d',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Tue Jan 11 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 1 day'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Tue Jan 11 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 1 day'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(8 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1d',time) >= '2000-01-03' AND time_bucket('7d',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 17 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 7 days'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 17 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 7 days'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 17 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 7 days'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(11 rows) + +\qecho no transformation +no transformation +:PREFIX SELECT * FROM hyper WHERE time_bucket(10 + floor(random())::int, time) > 10 AND time_bucket(10 + floor(random())::int, time) < 100 AND time < 150 ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper."time" + -> Custom Scan (ChunkAppend) on hyper + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_4_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_5_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_6_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_7_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_8_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_9_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_10_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_11_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_12_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_13_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_14_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_15_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) +(34 rows) + +\qecho exclude chunks based on time column with partitioning function. This +exclude chunks based on time column with partitioning function. This +\qecho transparently applies the time partitioning function on the time +transparently applies the time partitioning function on the time +\qecho value to be able to exclude chunks (similar to a closed dimension). +value to be able to exclude chunks (similar to a closed dimension). +:PREFIX SELECT * FROM hyper_timefunc WHERE time < 4 ORDER BY value; + QUERY PLAN +-------------------------------------------------------- + Sort + Sort Key: _hyper_4_124_chunk.value + -> Append + -> Seq Scan on _hyper_4_124_chunk + Filter: ("time" < '4'::double precision) + -> Seq Scan on _hyper_4_125_chunk + Filter: ("time" < '4'::double precision) + -> Seq Scan on _hyper_4_126_chunk + Filter: ("time" < '4'::double precision) + -> Seq Scan on _hyper_4_127_chunk + Filter: ("time" < '4'::double precision) +(11 rows) + +\qecho excluding based on time expression is currently unoptimized +excluding based on time expression is currently unoptimized +:PREFIX SELECT * FROM hyper_timefunc WHERE unix_to_timestamp(time) < 'Wed Dec 31 16:00:04 1969 PST' ORDER BY value; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_4_124_chunk.value + -> Append + -> Seq Scan on _hyper_4_124_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_125_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_126_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_127_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_128_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_129_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_130_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_131_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_132_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_133_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_134_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_135_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_136_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_137_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_138_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_139_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_140_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_141_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_142_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_143_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_144_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_145_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_146_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_147_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_148_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_149_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_150_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_151_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_152_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_153_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_154_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) +(65 rows) + +\qecho test qual propagation for joins +test qual propagation for joins +RESET constraint_exclusion; +\qecho nothing to propagate +nothing to propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1, metrics_timestamptz_2 m2 WHERE m1.time = m2.time ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(18 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(18 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(18 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: m1."time" + -> Merge Right Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(20 rows) + +\qecho OR constraints should not propagate +OR constraints should not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' OR m1.time > '2001-01-01' ORDER BY m1.time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Filter: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) OR ("time" > 'Mon Jan 01 00:00:00 2001 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Filter: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) OR ("time" > 'Mon Jan 01 00:00:00 2001 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(17 rows) + +\qecho test single constraint +test single constraint +\qecho constraint should be on both scans +constraint should be on both scans +\qecho these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +:PREFIX SELECT m1.time FROM metrics_timestamptz m1, metrics_timestamptz_2 m2 WHERE m1.time = m2.time AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(17 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +\qecho test 2 constraints on single relation +test 2 constraints on single relation +\qecho these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +:PREFIX SELECT m1.time FROM metrics_timestamptz m1, metrics_timestamptz_2 m2 WHERE m1.time = m2.time AND m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Append + -> Index Only Scan using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + Index Cond: ("time" = m1."time") +(20 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +\qecho test 2 constraints with 1 constraint on each relation +test 2 constraints with 1 constraint on each relation +\qecho these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +:PREFIX SELECT m1.time FROM metrics_timestamptz m1, metrics_timestamptz_2 m2 WHERE m1.time = m2.time AND m1.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +\qecho test constraints in ON clause of INNER JOIN +test constraints in ON clause of INNER JOIN +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +\qecho test constraints in ON clause of LEFT JOIN +test constraints in ON clause of LEFT JOIN +\qecho must not propagate +must not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(16 rows) + +\qecho test constraints in ON clause of RIGHT JOIN +test constraints in ON clause of RIGHT JOIN +\qecho must not propagate +must not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: m1_1."time" + -> Parallel Hash Left Join + Hash Cond: (m2_1."time" = m1_1."time") + Join Filter: ((m2_1."time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND (m2_1."time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Parallel Append + -> Parallel Seq Scan on _hyper_7_165_chunk m2_1 + -> Parallel Seq Scan on _hyper_7_166_chunk m2_2 + -> Parallel Seq Scan on _hyper_7_167_chunk m2_3 + -> Parallel Seq Scan on _hyper_7_168_chunk m2_4 + -> Parallel Seq Scan on _hyper_7_169_chunk m2_5 + -> Parallel Seq Scan on _hyper_7_170_chunk m2_6 + -> Parallel Hash + -> Parallel Append + -> Parallel Seq Scan on _hyper_6_160_chunk m1_1 + -> Parallel Seq Scan on _hyper_6_161_chunk m1_2 + -> Parallel Seq Scan on _hyper_6_162_chunk m1_3 + -> Parallel Seq Scan on _hyper_6_163_chunk m1_4 + -> Parallel Seq Scan on _hyper_6_164_chunk m1_5 +(21 rows) + +\qecho test equality condition not in ON clause +test equality condition not in ON clause +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON true WHERE m2.time = m1.time AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +\qecho test constraints not joined on +test constraints not joined on +\qecho device_id constraint must not propagate +device_id constraint must not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON true WHERE m2.time = m1.time AND m2.time < '2000-01-10' AND m1.device_id = 1 ORDER BY m1.time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Nested Loop + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Append + -> Index Only Scan using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" = m1."time") AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" = m1."time") AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(14 rows) + +\qecho test multiple join conditions +test multiple join conditions +\qecho device_id constraint should propagate +device_id constraint should propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON true WHERE m2.time = m1.time AND m1.device_id = m2.device_id AND m2.time < '2000-01-10' AND m1.device_id = 1 ORDER BY m1.time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Nested Loop + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Append + -> Index Scan using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" = m1."time") AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Filter: (device_id = 1) + -> Index Scan using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" = m1."time") AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Filter: (device_id = 1) +(16 rows) + +\qecho test join with 3 tables +test join with 3 tables +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time INNER JOIN metrics_timestamptz m3 ON m2.time=m3.time WHERE m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + -> Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Append + -> Index Only Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m3_1 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m3_2 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m3_3 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m3_4 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m3_5 + Index Cond: ("time" = m1."time") +(27 rows) + +\qecho test non-Const constraints +test non-Const constraints +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10'::text::timestamptz ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + Chunks excluded during startup: 3 + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + Chunks excluded during startup: 4 + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) +(17 rows) + +\qecho test now() +test now() +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < now() ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + Chunks excluded during startup: 0 + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + Index Cond: ("time" < now()) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + Chunks excluded during startup: 0 + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + Index Cond: ("time" < now()) +(31 rows) + +\qecho test volatile function +test volatile function +\qecho should not propagate +should not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < clock_timestamp() ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + Chunks excluded during startup: 0 + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + Filter: ("time" < clock_timestamp()) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(24 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m2.time < clock_timestamp() ORDER BY m1.time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m2."time" = m1."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + Chunks excluded during startup: 0 + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + Filter: ("time" < clock_timestamp()) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 +(25 rows) + +\qecho test JOINs with normal table +test JOINs with normal table +\qecho will not propagate because constraints are only added to hypertables +will not propagate because constraints are only added to hypertables +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN regular_timestamptz m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Sort + Sort Key: m2."time" + -> Seq Scan on regular_timestamptz m2 +(11 rows) + +\qecho test JOINs with normal table +test JOINs with normal table +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN regular_timestamptz m2 ON m1.time = m2.time WHERE m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Sort + Sort Key: m2."time" + -> Seq Scan on regular_timestamptz m2 + Filter: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(12 rows) + +\qecho test quals are not pushed into OUTER JOIN +test quals are not pushed into OUTER JOIN +CREATE TABLE outer_join_1 (id int, name text,time timestamptz NOT NULL DEFAULT '2000-01-01'); +CREATE TABLE outer_join_2 (id int, name text,time timestamptz NOT NULL DEFAULT '2000-01-01'); +SELECT (SELECT table_name FROM create_hypertable(tbl, 'time')) FROM (VALUES ('outer_join_1'),('outer_join_2')) v(tbl); + table_name +-------------- + outer_join_1 + outer_join_2 +(2 rows) + +INSERT INTO outer_join_1 VALUES(1,'a'), (2,'b'); +INSERT INTO outer_join_2 VALUES(1,'a'); +:PREFIX SELECT one.id, two.name FROM outer_join_1 one LEFT OUTER JOIN outer_join_2 two ON one.id=two.id WHERE one.id=2; + QUERY PLAN +------------------------------------------------- + Nested Loop Left Join + Join Filter: (one.id = two.id) + -> Seq Scan on _hyper_9_176_chunk one + Filter: (id = 2) + -> Materialize + -> Seq Scan on _hyper_10_177_chunk two + Filter: (id = 2) +(7 rows) + +:PREFIX SELECT one.id, two.name FROM outer_join_2 two RIGHT OUTER JOIN outer_join_1 one ON one.id=two.id WHERE one.id=2; + QUERY PLAN +------------------------------------------------- + Nested Loop Left Join + Join Filter: (one.id = two.id) + -> Seq Scan on _hyper_9_176_chunk one + Filter: (id = 2) + -> Materialize + -> Seq Scan on _hyper_10_177_chunk two + Filter: (id = 2) +(7 rows) + +DROP TABLE outer_join_1; +DROP TABLE outer_join_2; +-- test UNION between regular table and hypertable +SELECT time FROM regular_timestamptz UNION SELECT time FROM metrics_timestamptz ORDER BY 1; + time +------------------------------ + Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST +(32 rows) + +-- test UNION ALL between regular table and hypertable +SELECT time FROM regular_timestamptz UNION ALL SELECT time FROM metrics_timestamptz ORDER BY 1; + time +------------------------------ + Sat Jan 01 00:00:00 2000 PST + Sat Jan 01 00:00:00 2000 PST + Sat Jan 01 00:00:00 2000 PST + Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST +(128 rows) + +-- test nested join qual propagation +:PREFIX +SELECT * FROM ( +SELECT o1_m1.time FROM metrics_timestamptz o1_m1 INNER JOIN metrics_timestamptz_2 o1_m2 ON true WHERE o1_m2.time = o1_m1.time AND o1_m1.device_id = o1_m2.device_id AND o1_m2.time < '2000-01-10' AND o1_m1.device_id = 1 +) o1 FULL OUTER JOIN ( +SELECT o2_m1.time FROM metrics_timestamptz o2_m1 FULL OUTER JOIN metrics_timestamptz_2 o2_m2 ON true WHERE o2_m2.time = o2_m1.time AND o2_m1.device_id = o2_m2.device_id AND o2_m2.time > '2000-01-20' AND o2_m1.device_id = 2 +) o2 ON o1.time = o2.time ORDER BY 1,2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: o1_m1_1."time", o2_m1_1."time" + -> Merge Full Join + Merge Cond: (o2_m1_1."time" = o1_m1_1."time") + -> Nested Loop + -> Merge Append + Sort Key: o2_m2_1."time" + -> Index Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk o2_m2_1 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk o2_m2_2 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk o2_m2_3 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Append + -> Index Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk o2_m1_1 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk o2_m1_2 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk o2_m1_3 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk o2_m1_4 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk o2_m1_5 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Materialize + -> Nested Loop + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 o1_m2 + Order: o1_m2."time" + -> Index Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk o1_m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk o1_m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Append + -> Index Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk o1_m1_1 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk o1_m1_2 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk o1_m1_3 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk o1_m1_4 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk o1_m1_5 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) +(58 rows) + +:PREFIX +SELECT * FROM ( +SELECT o1_m1.time FROM metrics_timestamptz o1_m1 INNER JOIN metrics_timestamptz_2 o1_m2 ON o1_m2.time = o1_m1.time AND o1_m1.device_id = o1_m2.device_id WHERE o1_m2.time < '2000-01-10' AND o1_m1.device_id = 1 +) o1 FULL OUTER JOIN ( +SELECT o2_m1.time FROM metrics_timestamptz o2_m1 FULL OUTER JOIN metrics_timestamptz_2 o2_m2 ON o2_m2.time = o2_m1.time AND o2_m1.device_id = o2_m2.device_id WHERE o2_m2.time > '2000-01-20' AND o2_m1.device_id = 2 +) o2 ON o1.time = o2.time ORDER BY 1,2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: o1_m1_1."time", o2_m1_1."time" + -> Merge Full Join + Merge Cond: (o2_m1_1."time" = o1_m1_1."time") + -> Nested Loop + -> Merge Append + Sort Key: o2_m2_1."time" + -> Index Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk o2_m2_1 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk o2_m2_2 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk o2_m2_3 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Append + -> Index Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk o2_m1_1 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk o2_m1_2 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk o2_m1_3 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk o2_m1_4 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk o2_m1_5 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Materialize + -> Nested Loop + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 o1_m2 + Order: o1_m2."time" + -> Index Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk o1_m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk o1_m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Append + -> Index Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk o1_m1_1 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk o1_m1_2 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk o1_m1_3 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk o1_m1_4 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk o1_m1_5 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) +(58 rows) + +\ir include/plan_expand_hypertable_chunks_in_query.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +--we want to see how our logic excludes chunks +--and not how much work constraint_exclusion does +SET constraint_exclusion = 'off'; +:PREFIX SELECT * FROM hyper ORDER BY value; + QUERY PLAN +-------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + -> Seq Scan on _hyper_1_2_chunk + -> Seq Scan on _hyper_1_3_chunk + -> Seq Scan on _hyper_1_4_chunk + -> Seq Scan on _hyper_1_5_chunk + -> Seq Scan on _hyper_1_6_chunk + -> Seq Scan on _hyper_1_7_chunk + -> Seq Scan on _hyper_1_8_chunk + -> Seq Scan on _hyper_1_9_chunk + -> Seq Scan on _hyper_1_10_chunk + -> Seq Scan on _hyper_1_11_chunk + -> Seq Scan on _hyper_1_12_chunk + -> Seq Scan on _hyper_1_13_chunk + -> Seq Scan on _hyper_1_14_chunk + -> Seq Scan on _hyper_1_15_chunk + -> Seq Scan on _hyper_1_16_chunk + -> Seq Scan on _hyper_1_17_chunk + -> Seq Scan on _hyper_1_18_chunk + -> Seq Scan on _hyper_1_19_chunk + -> Seq Scan on _hyper_1_20_chunk + -> Seq Scan on _hyper_1_21_chunk + -> Seq Scan on _hyper_1_22_chunk + -> Seq Scan on _hyper_1_23_chunk + -> Seq Scan on _hyper_1_24_chunk + -> Seq Scan on _hyper_1_25_chunk + -> Seq Scan on _hyper_1_26_chunk + -> Seq Scan on _hyper_1_27_chunk + -> Seq Scan on _hyper_1_28_chunk + -> Seq Scan on _hyper_1_29_chunk + -> Seq Scan on _hyper_1_30_chunk + -> Seq Scan on _hyper_1_31_chunk + -> Seq Scan on _hyper_1_32_chunk + -> Seq Scan on _hyper_1_33_chunk + -> Seq Scan on _hyper_1_34_chunk + -> Seq Scan on _hyper_1_35_chunk + -> Seq Scan on _hyper_1_36_chunk + -> Seq Scan on _hyper_1_37_chunk + -> Seq Scan on _hyper_1_38_chunk + -> Seq Scan on _hyper_1_39_chunk + -> Seq Scan on _hyper_1_40_chunk + -> Seq Scan on _hyper_1_41_chunk + -> Seq Scan on _hyper_1_42_chunk + -> Seq Scan on _hyper_1_43_chunk + -> Seq Scan on _hyper_1_44_chunk + -> Seq Scan on _hyper_1_45_chunk + -> Seq Scan on _hyper_1_46_chunk + -> Seq Scan on _hyper_1_47_chunk + -> Seq Scan on _hyper_1_48_chunk + -> Seq Scan on _hyper_1_49_chunk + -> Seq Scan on _hyper_1_50_chunk + -> Seq Scan on _hyper_1_51_chunk + -> Seq Scan on _hyper_1_52_chunk + -> Seq Scan on _hyper_1_53_chunk + -> Seq Scan on _hyper_1_54_chunk + -> Seq Scan on _hyper_1_55_chunk + -> Seq Scan on _hyper_1_56_chunk + -> Seq Scan on _hyper_1_57_chunk + -> Seq Scan on _hyper_1_58_chunk + -> Seq Scan on _hyper_1_59_chunk + -> Seq Scan on _hyper_1_60_chunk + -> Seq Scan on _hyper_1_61_chunk + -> Seq Scan on _hyper_1_62_chunk + -> Seq Scan on _hyper_1_63_chunk + -> Seq Scan on _hyper_1_64_chunk + -> Seq Scan on _hyper_1_65_chunk + -> Seq Scan on _hyper_1_66_chunk + -> Seq Scan on _hyper_1_67_chunk + -> Seq Scan on _hyper_1_68_chunk + -> Seq Scan on _hyper_1_69_chunk + -> Seq Scan on _hyper_1_70_chunk + -> Seq Scan on _hyper_1_71_chunk + -> Seq Scan on _hyper_1_72_chunk + -> Seq Scan on _hyper_1_73_chunk + -> Seq Scan on _hyper_1_74_chunk + -> Seq Scan on _hyper_1_75_chunk + -> Seq Scan on _hyper_1_76_chunk + -> Seq Scan on _hyper_1_77_chunk + -> Seq Scan on _hyper_1_78_chunk + -> Seq Scan on _hyper_1_79_chunk + -> Seq Scan on _hyper_1_80_chunk + -> Seq Scan on _hyper_1_81_chunk + -> Seq Scan on _hyper_1_82_chunk + -> Seq Scan on _hyper_1_83_chunk + -> Seq Scan on _hyper_1_84_chunk + -> Seq Scan on _hyper_1_85_chunk + -> Seq Scan on _hyper_1_86_chunk + -> Seq Scan on _hyper_1_87_chunk + -> Seq Scan on _hyper_1_88_chunk + -> Seq Scan on _hyper_1_89_chunk + -> Seq Scan on _hyper_1_90_chunk + -> Seq Scan on _hyper_1_91_chunk + -> Seq Scan on _hyper_1_92_chunk + -> Seq Scan on _hyper_1_93_chunk + -> Seq Scan on _hyper_1_94_chunk + -> Seq Scan on _hyper_1_95_chunk + -> Seq Scan on _hyper_1_96_chunk + -> Seq Scan on _hyper_1_97_chunk + -> Seq Scan on _hyper_1_98_chunk + -> Seq Scan on _hyper_1_99_chunk + -> Seq Scan on _hyper_1_100_chunk + -> Seq Scan on _hyper_1_101_chunk + -> Seq Scan on _hyper_1_102_chunk +(105 rows) + +-- explicit chunk exclusion +:PREFIX SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[1,2]) ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + -> Seq Scan on _hyper_1_2_chunk +(5 rows) + +:PREFIX SELECT * FROM (SELECT * FROM hyper h WHERE _timescaledb_functions.chunks_in(h, ARRAY[1,2,3])) T ORDER BY value; + QUERY PLAN +---------------------------------------------- + Sort + Sort Key: h_1.value + -> Append + -> Seq Scan on _hyper_1_1_chunk h_1 + -> Seq Scan on _hyper_1_2_chunk h_2 + -> Seq Scan on _hyper_1_3_chunk h_3 +(6 rows) + +:PREFIX SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[1,2,3]) AND time < 10 ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_1_2_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_1_3_chunk + Filter: ("time" < 10) +(9 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE device_id = 'dev1' AND time < to_timestamp(10) AND _timescaledb_functions.chunks_in(hyper_ts, ARRAY[116]) ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(4 rows) + +:PREFIX SELECT * FROM hyper_ts h JOIN tag on (h.tag_id = tag.id ) WHERE _timescaledb_functions.chunks_in(h, ARRAY[116]) AND time < to_timestamp(10) AND device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: h.value + -> Merge Join + Merge Cond: (tag.id = h.tag_id) + -> Index Scan using tag_pkey on tag + -> Sort + Sort Key: h.tag_id + -> Seq Scan on _hyper_3_116_chunk h + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(9 rows) + +:PREFIX SELECT * FROM hyper_w_space h1 JOIN hyper_ts h2 ON h1.device_id=h2.device_id WHERE _timescaledb_functions.chunks_in(h1, ARRAY[104,105]) AND _timescaledb_functions.chunks_in(h2, ARRAY[116,117]) ORDER BY h1.value; + QUERY PLAN +------------------------------------------------------------- + Sort + Sort Key: h1_1.value + -> Hash Join + Hash Cond: (h2_1.device_id = h1_1.device_id) + -> Append + -> Seq Scan on _hyper_3_116_chunk h2_1 + -> Seq Scan on _hyper_3_117_chunk h2_2 + -> Hash + -> Append + -> Seq Scan on _hyper_2_104_chunk h1_1 + -> Seq Scan on _hyper_2_105_chunk h1_2 +(11 rows) + +:PREFIX SELECT * FROM hyper_w_space h1 JOIN hyper_ts h2 ON h1.device_id=h2.device_id AND _timescaledb_functions.chunks_in(h2, ARRAY[116,117]) WHERE _timescaledb_functions.chunks_in(h1, ARRAY[104,105]) ORDER BY h1.value; + QUERY PLAN +------------------------------------------------------------- + Sort + Sort Key: h1_1.value + -> Hash Join + Hash Cond: (h2_1.device_id = h1_1.device_id) + -> Append + -> Seq Scan on _hyper_3_116_chunk h2_1 + -> Seq Scan on _hyper_3_117_chunk h2_2 + -> Hash + -> Append + -> Seq Scan on _hyper_2_104_chunk h1_1 + -> Seq Scan on _hyper_2_105_chunk h1_2 +(11 rows) + +:PREFIX SELECT * FROM hyper h1, hyper h2 WHERE _timescaledb_functions.chunks_in(h1, ARRAY[1,2]) AND _timescaledb_functions.chunks_in(h2, ARRAY[2,3]); + QUERY PLAN +----------------------------------------------------- + Nested Loop + -> Append + -> Seq Scan on _hyper_1_1_chunk h1_1 + -> Seq Scan on _hyper_1_2_chunk h1_2 + -> Materialize + -> Append + -> Seq Scan on _hyper_1_2_chunk h2_1 + -> Seq Scan on _hyper_1_3_chunk h2_2 +(8 rows) + +SET enable_seqscan=false; +-- Should perform index-only scan. Since we pass whole row into the function it might block planner from using index-only scan. +-- But since we'll remove the function from the query tree before planner decision it shouldn't affect index-only decision. +:PREFIX SELECT time FROM hyper WHERE time=0 AND _timescaledb_functions.chunks_in(hyper, ARRAY[1]); + QUERY PLAN +--------------------------------------------------------------------------- + Index Only Scan using _hyper_1_1_chunk_hyper_time_idx on _hyper_1_1_chunk + Index Cond: ("time" = 0) +(2 rows) + +:PREFIX SELECT first(value, time) FROM hyper h WHERE _timescaledb_functions.chunks_in(h, ARRAY[1]); + QUERY PLAN +----------------------------------------------------------------------------------------------- + Result + InitPlan 1 (returns $0) + -> Limit + -> Index Scan Backward using _hyper_1_1_chunk_hyper_time_idx on _hyper_1_1_chunk h + Index Cond: ("time" IS NOT NULL) +(5 rows) + +\set ON_ERROR_STOP 0 +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[1,2]) AND _timescaledb_functions.chunks_in(hyper, ARRAY[2,3]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:26: ERROR: illegal invocation of chunks_in function +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(2, ARRAY[1]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:27: ERROR: function _timescaledb_functions.chunks_in(integer, integer[]) does not exist at character 27 +SELECT * FROM hyper WHERE time < 10 OR _timescaledb_functions.chunks_in(hyper, ARRAY[1,2]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:28: ERROR: illegal invocation of chunks_in function +SELECT _timescaledb_functions.chunks_in(hyper, ARRAY[1,2]) FROM hyper; +psql:include/plan_expand_hypertable_chunks_in_query.sql:29: ERROR: illegal invocation of chunks_in function +-- non existing chunk id +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[123456789]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:31: ERROR: chunk id 123456789 not found +-- chunk that belongs to another hypertable +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[104]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:33: ERROR: chunk id 104 does not belong to hypertable "hyper" +-- passing wrong row ref +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(ROW(1,2), ARRAY[104]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:35: ERROR: first parameter for chunks_in function needs to be record +-- passing func as chunk id +SELECT * FROM hyper h WHERE _timescaledb_functions.chunks_in(h, array_append(ARRAY[1],current_setting('server_version_num')::int)); +psql:include/plan_expand_hypertable_chunks_in_query.sql:37: ERROR: second argument to chunk_in should contain only integer consts +-- NULL chunk IDs not allowed in chunk array +SELECT * FROM hyper h WHERE _timescaledb_functions.chunks_in(h, ARRAY[NULL::int]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:39: ERROR: chunk id can't be NULL +\set ON_ERROR_STOP 1 +-- chunks_in is STRICT function and for NULL arguments a null result is returned +SELECT * FROM hyper h WHERE _timescaledb_functions.chunks_in(h, NULL); + value | time +-------+------ +(0 rows) + +\set ECHO errors +RESET timescaledb.enable_optimizations; +CREATE TABLE t(time timestamptz NOT NULL); +SELECT table_name FROM create_hypertable('t','time'); + table_name +------------ + t +(1 row) + +INSERT INTO t VALUES ('2000-01-01'), ('2010-01-01'), ('2020-01-01'); +EXPLAIN (costs off) SELECT * FROM t t1 INNER JOIN t t2 ON t1.time = t2.time WHERE t1.time < timestamptz '2010-01-01'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (t1_1."time" = t2_1."time") + -> Merge Append + Sort Key: t1_1."time" + -> Index Only Scan Backward using _hyper_15_182_chunk_t_time_idx on _hyper_15_182_chunk t1_1 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_15_183_chunk_t_time_idx on _hyper_15_183_chunk t1_2 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Materialize + -> Merge Append + Sort Key: t2_1."time" + -> Index Only Scan Backward using _hyper_15_182_chunk_t_time_idx on _hyper_15_182_chunk t2_1 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_15_183_chunk_t_time_idx on _hyper_15_183_chunk t2_2 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) +(15 rows) + +SET timescaledb.enable_qual_propagation TO false; +EXPLAIN (costs off) SELECT * FROM t t1 INNER JOIN t t2 ON t1.time = t2.time WHERE t1.time < timestamptz '2010-01-01'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (t1_1."time" = t2_1."time") + -> Merge Append + Sort Key: t1_1."time" + -> Index Only Scan Backward using _hyper_15_182_chunk_t_time_idx on _hyper_15_182_chunk t1_1 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_15_183_chunk_t_time_idx on _hyper_15_183_chunk t1_2 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Materialize + -> Merge Append + Sort Key: t2_1."time" + -> Index Only Scan Backward using _hyper_15_182_chunk_t_time_idx on _hyper_15_182_chunk t2_1 + -> Index Only Scan Backward using _hyper_15_183_chunk_t_time_idx on _hyper_15_183_chunk t2_2 + -> Index Only Scan Backward using _hyper_15_184_chunk_t_time_idx on _hyper_15_184_chunk t2_3 +(14 rows) + +RESET timescaledb.enable_qual_propagation; +CREATE TABLE test (a int, time timestamptz NOT NULL); +SELECT table_name FROM create_hypertable('public.test', 'time'); + table_name +------------ + test +(1 row) + +INSERT INTO test SELECT i, '2020-04-01'::date-10-i from generate_series(1,20) i; +CREATE OR REPLACE FUNCTION test_f(_ts timestamptz) +RETURNS SETOF test LANGUAGE SQL STABLE PARALLEL SAFE +AS $f$ + SELECT DISTINCT ON (a) * FROM test WHERE time >= _ts ORDER BY a, time DESC +$f$; +EXPLAIN (costs off) SELECT * FROM test_f(now()); + QUERY PLAN +------------------------------------------------- + Unique + -> Sort + Sort Key: test.a, test."time" DESC + -> Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 4 +(5 rows) + +EXPLAIN (costs off) SELECT * FROM test_f(now()); + QUERY PLAN +------------------------------------------------- + Unique + -> Sort + Sort Key: test.a, test."time" DESC + -> Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 4 +(5 rows) + +CREATE TABLE t1 (a int, b int NOT NULL); +SELECT create_hypertable('t1', 'b', chunk_time_interval=>10); + create_hypertable +------------------- + (17,public,t1,t) +(1 row) + +CREATE TABLE t2 (a int, b int NOT NULL); +SELECT create_hypertable('t2', 'b', chunk_time_interval=>10); + create_hypertable +------------------- + (18,public,t2,t) +(1 row) + +CREATE OR REPLACE FUNCTION f_t1(_a int, _b int) + RETURNS SETOF t1 + LANGUAGE SQL + STABLE PARALLEL SAFE +AS $function$ + SELECT DISTINCT ON (a) * FROM t1 WHERE a = _a and b = _b ORDER BY a, b DESC +$function$ +; +CREATE OR REPLACE FUNCTION f_t2(_a int, _b int) RETURNS SETOF t2 LANGUAGE sql STABLE PARALLEL SAFE +AS $function$ + SELECT DISTINCT ON (j.a) j.* + FROM + f_t1(_a, _b) sc, + t2 j + WHERE + j.b = _b AND + j.a = _a + ORDER BY j.a, j.b DESC +$function$ +; +CREATE OR REPLACE FUNCTION f_t1_2(_b int) RETURNS SETOF t1 LANGUAGE SQL STABLE PARALLEL SAFE +AS $function$ + SELECT DISTINCT ON (j.a) jt.* FROM t1 j, f_t1(j.a, _b) jt +$function$; +EXPLAIN (costs off) SELECT * FROM f_t1_2(10); + QUERY PLAN +--------------------------------------------------------------- + Subquery Scan on f_t1_2 + -> Unique + -> Sort + Sort Key: j.a + -> Nested Loop + -> Seq Scan on t1 j + -> Unique + -> Index Scan using t1_b_idx on t1 + Index Cond: (b = 10) + Filter: (a = j.a) +(10 rows) + +EXPLAIN (costs off) SELECT * FROM f_t1_2(10) sc, f_t2(sc.a, 10); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Unique + -> Sort + Sort Key: j.a + -> Nested Loop + -> Seq Scan on t1 j + -> Unique + -> Index Scan using t1_b_idx on t1 + Index Cond: (b = 10) + Filter: (a = j.a) + -> Unique + -> Nested Loop + -> Unique + -> Index Scan using t1_b_idx on t1 t1_1 + Index Cond: (b = 10) + Filter: (a = t1.a) + -> Index Scan using t2_b_idx on t2 j_1 + Index Cond: (b = 10) + Filter: (a = t1.a) +(19 rows) + +--TEST END-- diff --git a/test/expected/plan_expand_hypertable-15.out b/test/expected/plan_expand_hypertable-15.out new file mode 100644 index 00000000000..cd939c7bd71 --- /dev/null +++ b/test/expected/plan_expand_hypertable-15.out @@ -0,0 +1,3026 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\set PREFIX 'EXPLAIN (costs off) ' +\ir include/plan_expand_hypertable_load.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +--single time dimension +CREATE TABLE hyper ("time_broken" bigint NOT NULL, "value" integer); +ALTER TABLE hyper +DROP COLUMN time_broken, +ADD COLUMN time BIGINT; +SELECT create_hypertable('hyper', 'time', chunk_time_interval => 10); +psql:include/plan_expand_hypertable_load.sql:12: NOTICE: adding not-null constraint to column "time" + create_hypertable +-------------------- + (1,public,hyper,t) +(1 row) + +INSERT INTO hyper SELECT g, g FROM generate_series(0,1000) g; +--insert a point with INT_MAX_64 +INSERT INTO hyper (time, value) SELECT 9223372036854775807::bigint, 0; +--time and space +CREATE TABLE hyper_w_space ("time_broken" bigint NOT NULL, "device_id" text, "value" integer); +ALTER TABLE hyper_w_space +DROP COLUMN time_broken, +ADD COLUMN time BIGINT; +SELECT create_hypertable('hyper_w_space', 'time', 'device_id', 4, chunk_time_interval => 10); +psql:include/plan_expand_hypertable_load.sql:26: NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (2,public,hyper_w_space,t) +(1 row) + +INSERT INTO hyper_w_space (time, device_id, value) SELECT g, 'dev' || g, g FROM generate_series(0,30) g; +CREATE VIEW hyper_w_space_view AS (SELECT * FROM hyper_w_space); +--with timestamp and space +CREATE TABLE tag (id serial PRIMARY KEY, name text); +CREATE TABLE hyper_ts ("time_broken" timestamptz NOT NULL, "device_id" text, tag_id INT REFERENCES tag(id), "value" integer); +ALTER TABLE hyper_ts +DROP COLUMN time_broken, +ADD COLUMN time TIMESTAMPTZ; +SELECT create_hypertable('hyper_ts', 'time', 'device_id', 2, chunk_time_interval => '10 seconds'::interval); +psql:include/plan_expand_hypertable_load.sql:41: NOTICE: adding not-null constraint to column "time" + create_hypertable +----------------------- + (3,public,hyper_ts,t) +(1 row) + +INSERT INTO tag(name) SELECT 'tag'||g FROM generate_series(0,10) g; +INSERT INTO hyper_ts (time, device_id, tag_id, value) SELECT to_timestamp(g), 'dev' || g, (random() /10)+1, g FROM generate_series(0,30) g; +--one in the future +INSERT INTO hyper_ts (time, device_id, tag_id, value) VALUES ('2100-01-01 02:03:04 PST', 'dev101', 1, 0); +--time partitioning function +CREATE OR REPLACE FUNCTION unix_to_timestamp(unixtime float8) + RETURNS TIMESTAMPTZ LANGUAGE SQL IMMUTABLE PARALLEL SAFE STRICT AS +$BODY$ + SELECT to_timestamp(unixtime); +$BODY$; +CREATE TABLE hyper_timefunc ("time" float8 NOT NULL, "device_id" text, "value" integer); +SELECT create_hypertable('hyper_timefunc', 'time', 'device_id', 4, chunk_time_interval => 10, time_partitioning_func => 'unix_to_timestamp'); +psql:include/plan_expand_hypertable_load.sql:57: WARNING: unexpected interval: smaller than one second + create_hypertable +----------------------------- + (4,public,hyper_timefunc,t) +(1 row) + +INSERT INTO hyper_timefunc (time, device_id, value) SELECT g, 'dev' || g, g FROM generate_series(0,30) g; +CREATE TABLE metrics_timestamp(time timestamp); +SELECT create_hypertable('metrics_timestamp','time'); +psql:include/plan_expand_hypertable_load.sql:62: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +psql:include/plan_expand_hypertable_load.sql:62: NOTICE: adding not-null constraint to column "time" + create_hypertable +-------------------------------- + (5,public,metrics_timestamp,t) +(1 row) + +INSERT INTO metrics_timestamp SELECT generate_series('2000-01-01'::timestamp,'2000-02-01'::timestamp,'1d'::interval); +CREATE TABLE metrics_timestamptz(time timestamptz, device_id int); +SELECT create_hypertable('metrics_timestamptz','time'); +psql:include/plan_expand_hypertable_load.sql:66: NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------------- + (6,public,metrics_timestamptz,t) +(1 row) + +INSERT INTO metrics_timestamptz SELECT generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval), 1; +INSERT INTO metrics_timestamptz SELECT generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval), 2; +INSERT INTO metrics_timestamptz SELECT generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval), 3; +--create a second table to test joins with +CREATE TABLE metrics_timestamptz_2 (LIKE metrics_timestamptz); +SELECT create_hypertable('metrics_timestamptz_2','time'); + create_hypertable +------------------------------------ + (7,public,metrics_timestamptz_2,t) +(1 row) + +INSERT INTO metrics_timestamptz_2 +SELECT * FROM metrics_timestamptz; +INSERT INTO metrics_timestamptz_2 VALUES ('2000-12-01'::timestamptz, 3); +CREATE TABLE metrics_date(time date); +SELECT create_hypertable('metrics_date','time'); +psql:include/plan_expand_hypertable_load.sql:79: NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------- + (8,public,metrics_date,t) +(1 row) + +INSERT INTO metrics_date SELECT generate_series('2000-01-01'::date,'2000-02-01'::date,'1d'::interval); +ANALYZE hyper; +ANALYZE hyper_w_space; +ANALYZE tag; +ANALYZE hyper_ts; +ANALYZE hyper_timefunc; +-- create normal table for JOIN tests +CREATE TABLE regular_timestamptz(time timestamptz); +INSERT INTO regular_timestamptz SELECT generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval); +\ir include/plan_expand_hypertable_query.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +--we want to see how our logic excludes chunks +--and not how much work constraint_exclusion does +SET constraint_exclusion = 'off'; +\qecho test upper bounds +test upper bounds +:PREFIX SELECT * FROM hyper WHERE time < 10 ORDER BY value; + QUERY PLAN +------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time < 11 ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 11) + -> Seq Scan on _hyper_1_2_chunk + Filter: ("time" < 11) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE time = 10 ORDER BY value; + QUERY PLAN +------------------------------------ + Sort + Sort Key: _hyper_1_2_chunk.value + -> Seq Scan on _hyper_1_2_chunk + Filter: ("time" = 10) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE 10 >= time ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (10 >= "time") + -> Seq Scan on _hyper_1_2_chunk + Filter: (10 >= "time") +(7 rows) + +\qecho test lower bounds +test lower bounds +:PREFIX SELECT * FROM hyper WHERE time >= 10 and time < 20 ORDER BY value; + QUERY PLAN +---------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk.value + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" >= 10) AND ("time" < 20)) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE 10 < time and 20 >= time ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk.value + -> Append + -> Seq Scan on _hyper_1_2_chunk + Filter: ((10 < "time") AND (20 >= "time")) + -> Seq Scan on _hyper_1_3_chunk + Filter: ((10 < "time") AND (20 >= "time")) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE time >= 9 and time < 20 ORDER BY value; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 9) AND ("time" < 20)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" >= 9) AND ("time" < 20)) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE time > 9 and time < 20 ORDER BY value; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk.value + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 9) AND ("time" < 20)) +(4 rows) + +\qecho test empty result +test empty result +:PREFIX SELECT * FROM hyper WHERE time < 0; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +\qecho test expression evaluation +test expression evaluation +:PREFIX SELECT * FROM hyper WHERE time < (5*2)::smallint; + QUERY PLAN +------------------------------------- + Seq Scan on _hyper_1_1_chunk + Filter: ("time" < '10'::smallint) +(2 rows) + +\qecho test logic at INT64_MAX +test logic at INT64_MAX +:PREFIX SELECT * FROM hyper WHERE time = 9223372036854775807::bigint ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_1_102_chunk.value + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" = '9223372036854775807'::bigint) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time = 9223372036854775806::bigint ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_1_102_chunk.value + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" = '9223372036854775806'::bigint) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time >= 9223372036854775807::bigint ORDER BY value; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: _hyper_1_102_chunk.value + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" >= '9223372036854775807'::bigint) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time > 9223372036854775807::bigint ORDER BY value; + QUERY PLAN +-------------------------------- + Sort + Sort Key: value + -> Result + One-Time Filter: false +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time > 9223372036854775806::bigint ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_1_102_chunk.value + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" > '9223372036854775806'::bigint) +(4 rows) + +\qecho cte +cte +:PREFIX WITH cte AS( + SELECT * FROM hyper WHERE time < 10 +) +SELECT * FROM cte ORDER BY value; + QUERY PLAN +------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) +(4 rows) + +\qecho subquery +subquery +:PREFIX SELECT 0 = ANY (SELECT value FROM hyper WHERE time < 10); + QUERY PLAN +-------------------------------------- + Result + SubPlan 1 + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) +(4 rows) + +\qecho no space constraint +no space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 ORDER BY value; + QUERY PLAN +-------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < 10) +(11 rows) + +\qecho valid space constraint +valid space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and device_id = 'dev5' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and 'dev5' = device_id ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND ('dev5'::text = device_id)) +(4 rows) + +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and 'dev'||(2+3) = device_id ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND ('dev5'::text = device_id)) +(4 rows) + +\qecho only space constraint +only space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE 'dev5' = device_id ORDER BY value; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Append + -> Seq Scan on _hyper_2_106_chunk + Filter: ('dev5'::text = device_id) + -> Seq Scan on _hyper_2_109_chunk + Filter: ('dev5'::text = device_id) + -> Seq Scan on _hyper_2_111_chunk + Filter: ('dev5'::text = device_id) +(9 rows) + +\qecho unhandled space constraint +unhandled space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and device_id > 'dev5' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) + -> Seq Scan on _hyper_2_104_chunk + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) +(11 rows) + +\qecho use of OR - does not filter chunks +use of OR - does not filter chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND (device_id = 'dev5' or device_id = 'dev6') ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) + -> Seq Scan on _hyper_2_104_chunk + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) +(11 rows) + +\qecho cte +cte +:PREFIX WITH cte AS( + SELECT * FROM hyper_w_space WHERE time < 10 and device_id = 'dev5' +) +SELECT * FROM cte ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho subquery +subquery +:PREFIX SELECT 0 = ANY (SELECT value FROM hyper_w_space WHERE time < 10 and device_id = 'dev5'); + QUERY PLAN +------------------------------------------------------------------ + Result + SubPlan 1 + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho view +view +:PREFIX SELECT * FROM hyper_w_space_view WHERE time < 10 and device_id = 'dev5' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho IN statement - simple +IN statement - simple +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id IN ('dev5') ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho IN statement - two chunks +IN statement - two chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id IN ('dev5','dev6') ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_105_chunk.value + -> Append + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[]))) +(7 rows) + +\qecho IN statement - one chunk +IN statement - one chunk +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id IN ('dev4','dev5') ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev4,dev5}'::text[]))) +(4 rows) + +\qecho NOT IN - does not filter chunks +NOT IN - does not filter chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id NOT IN ('dev5','dev6') ORDER BY value; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND (device_id <> ALL ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_104_chunk + Filter: (("time" < 10) AND (device_id <> ALL ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id <> ALL ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id <> ALL ('{dev5,dev6}'::text[]))) +(11 rows) + +\qecho IN statement with subquery - does not filter chunks +IN statement with subquery - does not filter chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id IN (SELECT 'dev5'::text) ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho ANY +ANY +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id = ANY(ARRAY['dev5','dev6']) ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_105_chunk.value + -> Append + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[]))) +(7 rows) + +\qecho ANY with intersection +ANY with intersection +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id = ANY(ARRAY['dev5','dev6']) AND device_id = ANY(ARRAY['dev6','dev7']) ORDER BY value; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_105_chunk.value + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[])) AND (device_id = ANY ('{dev6,dev7}'::text[]))) +(4 rows) + +\qecho ANY without intersection shouldnt scan any chunks +ANY without intersection shouldnt scan any chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id = ANY(ARRAY['dev5','dev6']) AND device_id = ANY(ARRAY['dev8','dev9']) ORDER BY value; + QUERY PLAN +-------------------------------- + Sort + Sort Key: value + -> Result + One-Time Filter: false +(4 rows) + +\qecho ANY/IN/ALL only works for equals operator +ANY/IN/ALL only works for equals operator +:PREFIX SELECT * FROM hyper_w_space WHERE device_id < ANY(ARRAY['dev5','dev6']) ORDER BY value; + QUERY PLAN +----------------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_107_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_108_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_109_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_110_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_111_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_112_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_113_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_114_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_115_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) +(29 rows) + +\qecho ALL with equals and different values shouldnt scan any chunks +ALL with equals and different values shouldnt scan any chunks +:PREFIX SELECT * FROM hyper_w_space WHERE device_id = ALL(ARRAY['dev5','dev6']) ORDER BY value; + QUERY PLAN +-------------------------------- + Sort + Sort Key: value + -> Result + One-Time Filter: false +(4 rows) + +\qecho Multi AND +Multi AND +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND time < 100 ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND ("time" < 100)) + -> Seq Scan on _hyper_2_104_chunk + Filter: (("time" < 10) AND ("time" < 100)) + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND ("time" < 100)) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND ("time" < 100)) +(11 rows) + +\qecho Time dimension doesnt filter chunks when using IN/ANY with multiple arguments +Time dimension doesnt filter chunks when using IN/ANY with multiple arguments +:PREFIX SELECT * FROM hyper_w_space WHERE time < ANY(ARRAY[1,2]) ORDER BY value; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_107_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_108_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_109_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_110_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_111_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_112_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_113_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_114_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_115_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) +(29 rows) + +\qecho Time dimension chunk filtering works for ANY with single argument +Time dimension chunk filtering works for ANY with single argument +:PREFIX SELECT * FROM hyper_w_space WHERE time < ANY(ARRAY[1]) ORDER BY value; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < ANY ('{1}'::integer[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < ANY ('{1}'::integer[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < ANY ('{1}'::integer[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < ANY ('{1}'::integer[])) +(11 rows) + +\qecho Time dimension chunk filtering works for ALL with single argument +Time dimension chunk filtering works for ALL with single argument +:PREFIX SELECT * FROM hyper_w_space WHERE time < ALL(ARRAY[1]) ORDER BY value; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < ALL ('{1}'::integer[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < ALL ('{1}'::integer[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < ALL ('{1}'::integer[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < ALL ('{1}'::integer[])) +(11 rows) + +\qecho Time dimension chunk filtering works for ALL with multiple arguments +Time dimension chunk filtering works for ALL with multiple arguments +:PREFIX SELECT * FROM hyper_w_space WHERE time < ALL(ARRAY[1,10,20,30]) ORDER BY value; + QUERY PLAN +------------------------------------------------------------------ + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < ALL ('{1,10,20,30}'::integer[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < ALL ('{1,10,20,30}'::integer[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < ALL ('{1,10,20,30}'::integer[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < ALL ('{1,10,20,30}'::integer[])) +(11 rows) + +\qecho AND intersection using IN and EQUALS +AND intersection using IN and EQUALS +:PREFIX SELECT * FROM hyper_w_space WHERE device_id IN ('dev1','dev2') AND device_id = 'dev1' ORDER BY value; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ((device_id = ANY ('{dev1,dev2}'::text[])) AND (device_id = 'dev1'::text)) + -> Seq Scan on _hyper_2_110_chunk + Filter: ((device_id = ANY ('{dev1,dev2}'::text[])) AND (device_id = 'dev1'::text)) + -> Seq Scan on _hyper_2_114_chunk + Filter: ((device_id = ANY ('{dev1,dev2}'::text[])) AND (device_id = 'dev1'::text)) +(9 rows) + +\qecho AND with no intersection using IN and EQUALS +AND with no intersection using IN and EQUALS +:PREFIX SELECT * FROM hyper_w_space WHERE device_id IN ('dev1','dev2') AND device_id = 'dev3' ORDER BY value; + QUERY PLAN +-------------------------------- + Sort + Sort Key: value + -> Result + One-Time Filter: false +(4 rows) + +\qecho timestamps +timestamps +\qecho these should work since they are immutable functions +these should work since they are immutable functions +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969 PST'::timestamptz ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Append + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) +(7 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < to_timestamp(10) ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Append + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) +(7 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969'::timestamp AT TIME ZONE 'PST' ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Append + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) +(7 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(4 rows) + +\qecho these should not work since uses stable functions; +these should not work since uses stable functions; +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969'::timestamp ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------ + Sort + Sort Key: hyper_ts.value + -> Custom Scan (ChunkAppend) on hyper_ts + Chunks excluded during startup: 6 + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) +(8 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < ('Wed Dec 31 16:00:10 1969'::timestamp::timestamptz) ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Custom Scan (ChunkAppend) on hyper_ts + Chunks excluded during startup: 6 + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) +(8 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE NOW() < time ORDER BY value; + QUERY PLAN +--------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Custom Scan (ChunkAppend) on hyper_ts + Chunks excluded during startup: 7 + -> Seq Scan on _hyper_3_123_chunk + Filter: (now() < "time") +(6 rows) + +\qecho joins +joins +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.id=1) and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Nested Loop Semi Join + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text) AND (tag_id = 1)) + -> Seq Scan on tag + Filter: (id = 1) +(7 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.id=1) or (time < to_timestamp(10) and device_id = 'dev1') ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Custom Scan (ChunkAppend) on hyper_ts + -> Seq Scan on _hyper_3_116_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + SubPlan 1 + -> Seq Scan on tag + Filter: (id = 1) + -> Seq Scan on _hyper_3_117_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_118_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_119_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_120_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_121_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_122_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_123_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) +(22 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.name='tag1') and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Nested Loop + Join Filter: (_hyper_3_116_chunk.tag_id = tag.id) + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) + -> Seq Scan on tag + Filter: (name = 'tag1'::text) +(8 rows) + +:PREFIX SELECT * FROM hyper_ts JOIN tag on (hyper_ts.tag_id = tag.id ) WHERE time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Merge Join + Merge Cond: (tag.id = _hyper_3_116_chunk.tag_id) + -> Index Scan using tag_pkey on tag + -> Sort + Sort Key: _hyper_3_116_chunk.tag_id + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(9 rows) + +:PREFIX SELECT * FROM hyper_ts JOIN tag on (hyper_ts.tag_id = tag.id ) WHERE tag.name = 'tag1' and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Nested Loop + Join Filter: (_hyper_3_116_chunk.tag_id = tag.id) + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) + -> Seq Scan on tag + Filter: (name = 'tag1'::text) +(8 rows) + +\qecho test constraint exclusion for constraints in ON clause of JOINs +test constraint exclusion for constraints in ON clause of JOINs +\qecho should exclude chunks on m1 and propagate qual to m2 because of INNER JOIN +should exclude chunks on m1 and propagate qual to m2 because of INNER JOIN +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +\qecho should exclude chunks on m2 and propagate qual to m1 because of INNER JOIN +should exclude chunks on m2 and propagate qual to m1 because of INNER JOIN +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +\qecho must not exclude on m1 +must not exclude on m1 +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + Join Filter: (m1."time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(19 rows) + +\qecho should exclude chunks on m2 +should exclude chunks on m2 +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(24 rows) + +\qecho should exclude chunks on m1 +should exclude chunks on m1 +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: m1."time" + -> Merge Right Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(25 rows) + +\qecho must not exclude chunks on m2 +must not exclude chunks on m2 +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: m1."time" + -> Merge Left Join + Merge Cond: (m2."time" = m1."time") + Join Filter: (m2."time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 +(21 rows) + +\qecho time_bucket exclusion +time_bucket exclusion +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) < 10::bigint ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < '10'::bigint) AND (time_bucket('10'::bigint, "time") < '10'::bigint)) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) < 11::bigint ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < '21'::bigint) AND (time_bucket('10'::bigint, "time") < '11'::bigint)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" < '21'::bigint) AND (time_bucket('10'::bigint, "time") < '11'::bigint)) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" < '21'::bigint) AND (time_bucket('10'::bigint, "time") < '11'::bigint)) +(9 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) <= 10::bigint ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" <= '20'::bigint) AND (time_bucket('10'::bigint, "time") <= '10'::bigint)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" <= '20'::bigint) AND (time_bucket('10'::bigint, "time") <= '10'::bigint)) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" <= '20'::bigint) AND (time_bucket('10'::bigint, "time") <= '10'::bigint)) +(9 rows) + +:PREFIX SELECT * FROM hyper WHERE 10::bigint > time_bucket(10, time) ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < '10'::bigint) AND ('10'::bigint > time_bucket('10'::bigint, "time"))) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE 11::bigint > time_bucket(10, time) ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < '21'::bigint) AND ('11'::bigint > time_bucket('10'::bigint, "time"))) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" < '21'::bigint) AND ('11'::bigint > time_bucket('10'::bigint, "time"))) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" < '21'::bigint) AND ('11'::bigint > time_bucket('10'::bigint, "time"))) +(9 rows) + +\qecho test overflow behaviour of time_bucket exclusion +test overflow behaviour of time_bucket exclusion +:PREFIX SELECT * FROM hyper WHERE time > 950 AND time_bucket(10, time) < '9223372036854775807'::bigint ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_96_chunk."time" + -> Append + -> Seq Scan on _hyper_1_96_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_97_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_98_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_99_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_100_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_101_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_102_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) +(17 rows) + +\qecho test timestamp upper boundary +test timestamp upper boundary +\qecho there should be no transformation if we are out of the supported (TimescaleDB-specific) range +there should be no transformation if we are out of the supported (TimescaleDB-specific) range +:PREFIX SELECT * FROM metrics_timestamp WHERE time_bucket('1d',time) < '294276-01-01'::timestamp ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Custom Scan (ChunkAppend) on metrics_timestamp + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_5_155_chunk_metrics_timestamp_time_idx on _hyper_5_155_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_156_chunk_metrics_timestamp_time_idx on _hyper_5_156_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_157_chunk_metrics_timestamp_time_idx on _hyper_5_157_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_158_chunk_metrics_timestamp_time_idx on _hyper_5_158_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_159_chunk_metrics_timestamp_time_idx on _hyper_5_159_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) +(12 rows) + +\qecho transformation would be out of range +transformation would be out of range +:PREFIX SELECT * FROM metrics_timestamp WHERE time_bucket('1000d',time) < '294276-01-01'::timestamp ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_5_155_chunk_metrics_timestamp_time_idx on _hyper_5_155_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_156_chunk_metrics_timestamp_time_idx on _hyper_5_156_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_157_chunk_metrics_timestamp_time_idx on _hyper_5_157_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_158_chunk_metrics_timestamp_time_idx on _hyper_5_158_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_159_chunk_metrics_timestamp_time_idx on _hyper_5_159_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) +(12 rows) + +\qecho test timestamptz upper boundary +test timestamptz upper boundary +\qecho there should be no transformation if we are out of the supported (TimescaleDB-specific) range +there should be no transformation if we are out of the supported (TimescaleDB-specific) range +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1d',time) < '294276-01-01'::timestamptz ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) +(12 rows) + +\qecho transformation would be out of range +transformation would be out of range +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1000d',time) < '294276-01-01'::timestamptz ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) +(12 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) > 10 AND time_bucket(10, time) < 100 ORDER BY time; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk."time" + -> Append + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_4_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_5_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_6_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_7_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_8_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_9_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_10_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) +(21 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) > 10 AND time_bucket(10, time) < 20 ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk."time" + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 10) AND ("time" < '20'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 20)) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(1, time) > 11 AND time_bucket(1, time) < 19 ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk."time" + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 11) AND ("time" < '19'::bigint) AND (time_bucket('1'::bigint, "time") > 11) AND (time_bucket('1'::bigint, "time") < 19)) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE 10 < time_bucket(10, time) AND 20 > time_bucket(10,time) ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk."time" + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 10) AND ("time" < '20'::bigint) AND (10 < time_bucket('10'::bigint, "time")) AND (20 > time_bucket('10'::bigint, "time"))) +(4 rows) + +\qecho time_bucket exclusion with date +time_bucket exclusion with date +:PREFIX SELECT * FROM metrics_date WHERE time_bucket('1d',time) < '2000-01-03' ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Index Only Scan Backward using _hyper_8_171_chunk_metrics_date_time_idx on _hyper_8_171_chunk + Index Cond: ("time" < '01-03-2000'::date) + Filter: (time_bucket('@ 1 day'::interval, "time") < '01-03-2000'::date) +(3 rows) + +:PREFIX SELECT * FROM metrics_date WHERE time_bucket('1d',time) >= '2000-01-03' AND time_bucket('1d',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_date + Order: metrics_date."time" + -> Index Only Scan Backward using _hyper_8_171_chunk_metrics_date_time_idx on _hyper_8_171_chunk + Index Cond: (("time" >= '01-03-2000'::date) AND ("time" <= '01-11-2000'::date)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= '01-03-2000'::date) AND (time_bucket('@ 1 day'::interval, "time") <= '01-10-2000'::date)) + -> Index Only Scan Backward using _hyper_8_172_chunk_metrics_date_time_idx on _hyper_8_172_chunk + Index Cond: (("time" >= '01-03-2000'::date) AND ("time" <= '01-11-2000'::date)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= '01-03-2000'::date) AND (time_bucket('@ 1 day'::interval, "time") <= '01-10-2000'::date)) +(8 rows) + +\qecho time_bucket exclusion with timestamp +time_bucket exclusion with timestamp +:PREFIX SELECT * FROM metrics_timestamp WHERE time_bucket('1d',time) < '2000-01-03' ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Index Only Scan Backward using _hyper_5_155_chunk_metrics_timestamp_time_idx on _hyper_5_155_chunk + Index Cond: ("time" < 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) +(3 rows) + +:PREFIX SELECT * FROM metrics_timestamp WHERE time_bucket('1d',time) >= '2000-01-03' AND time_bucket('1d',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_5_155_chunk_metrics_timestamp_time_idx on _hyper_5_155_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) AND ("time" <= 'Tue Jan 11 00:00:00 2000'::timestamp without time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) AND (time_bucket('@ 1 day'::interval, "time") <= 'Mon Jan 10 00:00:00 2000'::timestamp without time zone)) + -> Index Only Scan Backward using _hyper_5_156_chunk_metrics_timestamp_time_idx on _hyper_5_156_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) AND ("time" <= 'Tue Jan 11 00:00:00 2000'::timestamp without time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) AND (time_bucket('@ 1 day'::interval, "time") <= 'Mon Jan 10 00:00:00 2000'::timestamp without time zone)) +(8 rows) + +\qecho time_bucket exclusion with timestamptz +time_bucket exclusion with timestamptz +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('6h',time) < '2000-01-03' ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: ("time" < 'Mon Jan 03 06:00:00 2000 PST'::timestamp with time zone) + Filter: (time_bucket('@ 6 hours'::interval, "time") < 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) +(3 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('6h',time) >= '2000-01-03' AND time_bucket('6h',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 10 06:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 6 hours'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 6 hours'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 10 06:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 6 hours'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 6 hours'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(8 rows) + +\qecho time_bucket exclusion with timestamptz and day interval +time_bucket exclusion with timestamptz and day interval +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1d',time) < '2000-01-03' ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: ("time" < 'Tue Jan 04 00:00:00 2000 PST'::timestamp with time zone) + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) +(3 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1d',time) >= '2000-01-03' AND time_bucket('1d',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Tue Jan 11 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 1 day'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Tue Jan 11 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 1 day'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(8 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1d',time) >= '2000-01-03' AND time_bucket('7d',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 17 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 7 days'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 17 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 7 days'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 17 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 7 days'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(11 rows) + +\qecho no transformation +no transformation +:PREFIX SELECT * FROM hyper WHERE time_bucket(10 + floor(random())::int, time) > 10 AND time_bucket(10 + floor(random())::int, time) < 100 AND time < 150 ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper."time" + -> Custom Scan (ChunkAppend) on hyper + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_4_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_5_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_6_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_7_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_8_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_9_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_10_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_11_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_12_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_13_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_14_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_15_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) +(34 rows) + +\qecho exclude chunks based on time column with partitioning function. This +exclude chunks based on time column with partitioning function. This +\qecho transparently applies the time partitioning function on the time +transparently applies the time partitioning function on the time +\qecho value to be able to exclude chunks (similar to a closed dimension). +value to be able to exclude chunks (similar to a closed dimension). +:PREFIX SELECT * FROM hyper_timefunc WHERE time < 4 ORDER BY value; + QUERY PLAN +-------------------------------------------------------- + Sort + Sort Key: _hyper_4_124_chunk.value + -> Append + -> Seq Scan on _hyper_4_124_chunk + Filter: ("time" < '4'::double precision) + -> Seq Scan on _hyper_4_125_chunk + Filter: ("time" < '4'::double precision) + -> Seq Scan on _hyper_4_126_chunk + Filter: ("time" < '4'::double precision) + -> Seq Scan on _hyper_4_127_chunk + Filter: ("time" < '4'::double precision) +(11 rows) + +\qecho excluding based on time expression is currently unoptimized +excluding based on time expression is currently unoptimized +:PREFIX SELECT * FROM hyper_timefunc WHERE unix_to_timestamp(time) < 'Wed Dec 31 16:00:04 1969 PST' ORDER BY value; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_4_124_chunk.value + -> Append + -> Seq Scan on _hyper_4_124_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_125_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_126_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_127_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_128_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_129_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_130_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_131_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_132_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_133_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_134_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_135_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_136_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_137_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_138_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_139_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_140_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_141_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_142_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_143_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_144_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_145_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_146_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_147_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_148_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_149_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_150_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_151_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_152_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_153_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_154_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) +(65 rows) + +\qecho test qual propagation for joins +test qual propagation for joins +RESET constraint_exclusion; +\qecho nothing to propagate +nothing to propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1, metrics_timestamptz_2 m2 WHERE m1.time = m2.time ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(18 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(18 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(18 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: m1."time" + -> Merge Right Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(20 rows) + +\qecho OR constraints should not propagate +OR constraints should not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' OR m1.time > '2001-01-01' ORDER BY m1.time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Filter: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) OR ("time" > 'Mon Jan 01 00:00:00 2001 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Filter: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) OR ("time" > 'Mon Jan 01 00:00:00 2001 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(17 rows) + +\qecho test single constraint +test single constraint +\qecho constraint should be on both scans +constraint should be on both scans +\qecho these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +:PREFIX SELECT m1.time FROM metrics_timestamptz m1, metrics_timestamptz_2 m2 WHERE m1.time = m2.time AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(17 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +\qecho test 2 constraints on single relation +test 2 constraints on single relation +\qecho these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +:PREFIX SELECT m1.time FROM metrics_timestamptz m1, metrics_timestamptz_2 m2 WHERE m1.time = m2.time AND m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Append + -> Index Only Scan using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + Index Cond: ("time" = m1."time") +(20 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +\qecho test 2 constraints with 1 constraint on each relation +test 2 constraints with 1 constraint on each relation +\qecho these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +:PREFIX SELECT m1.time FROM metrics_timestamptz m1, metrics_timestamptz_2 m2 WHERE m1.time = m2.time AND m1.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +\qecho test constraints in ON clause of INNER JOIN +test constraints in ON clause of INNER JOIN +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +\qecho test constraints in ON clause of LEFT JOIN +test constraints in ON clause of LEFT JOIN +\qecho must not propagate +must not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(16 rows) + +\qecho test constraints in ON clause of RIGHT JOIN +test constraints in ON clause of RIGHT JOIN +\qecho must not propagate +must not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: m1_1."time" + -> Parallel Hash Left Join + Hash Cond: (m2_1."time" = m1_1."time") + Join Filter: ((m2_1."time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND (m2_1."time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Parallel Append + -> Parallel Seq Scan on _hyper_7_165_chunk m2_1 + -> Parallel Seq Scan on _hyper_7_166_chunk m2_2 + -> Parallel Seq Scan on _hyper_7_167_chunk m2_3 + -> Parallel Seq Scan on _hyper_7_168_chunk m2_4 + -> Parallel Seq Scan on _hyper_7_169_chunk m2_5 + -> Parallel Seq Scan on _hyper_7_170_chunk m2_6 + -> Parallel Hash + -> Parallel Append + -> Parallel Seq Scan on _hyper_6_160_chunk m1_1 + -> Parallel Seq Scan on _hyper_6_161_chunk m1_2 + -> Parallel Seq Scan on _hyper_6_162_chunk m1_3 + -> Parallel Seq Scan on _hyper_6_163_chunk m1_4 + -> Parallel Seq Scan on _hyper_6_164_chunk m1_5 +(21 rows) + +\qecho test equality condition not in ON clause +test equality condition not in ON clause +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON true WHERE m2.time = m1.time AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +\qecho test constraints not joined on +test constraints not joined on +\qecho device_id constraint must not propagate +device_id constraint must not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON true WHERE m2.time = m1.time AND m2.time < '2000-01-10' AND m1.device_id = 1 ORDER BY m1.time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Nested Loop + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Append + -> Index Only Scan using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" = m1."time") AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" = m1."time") AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(14 rows) + +\qecho test multiple join conditions +test multiple join conditions +\qecho device_id constraint should propagate +device_id constraint should propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON true WHERE m2.time = m1.time AND m1.device_id = m2.device_id AND m2.time < '2000-01-10' AND m1.device_id = 1 ORDER BY m1.time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Nested Loop + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Append + -> Index Scan using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" = m1."time") AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Filter: (device_id = 1) + -> Index Scan using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" = m1."time") AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Filter: (device_id = 1) +(16 rows) + +\qecho test join with 3 tables +test join with 3 tables +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time INNER JOIN metrics_timestamptz m3 ON m2.time=m3.time WHERE m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + -> Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Append + -> Index Only Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m3_1 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m3_2 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m3_3 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m3_4 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m3_5 + Index Cond: ("time" = m1."time") +(27 rows) + +\qecho test non-Const constraints +test non-Const constraints +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10'::text::timestamptz ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + Chunks excluded during startup: 3 + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + Chunks excluded during startup: 4 + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) +(17 rows) + +\qecho test now() +test now() +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < now() ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + Chunks excluded during startup: 0 + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + Index Cond: ("time" < now()) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + Chunks excluded during startup: 0 + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + Index Cond: ("time" < now()) +(31 rows) + +\qecho test volatile function +test volatile function +\qecho should not propagate +should not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < clock_timestamp() ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + Chunks excluded during startup: 0 + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + Filter: ("time" < clock_timestamp()) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(24 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m2.time < clock_timestamp() ORDER BY m1.time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m2."time" = m1."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + Chunks excluded during startup: 0 + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + Filter: ("time" < clock_timestamp()) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 +(25 rows) + +\qecho test JOINs with normal table +test JOINs with normal table +\qecho will not propagate because constraints are only added to hypertables +will not propagate because constraints are only added to hypertables +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN regular_timestamptz m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Sort + Sort Key: m2."time" + -> Seq Scan on regular_timestamptz m2 +(11 rows) + +\qecho test JOINs with normal table +test JOINs with normal table +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN regular_timestamptz m2 ON m1.time = m2.time WHERE m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Sort + Sort Key: m2."time" + -> Seq Scan on regular_timestamptz m2 + Filter: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(12 rows) + +\qecho test quals are not pushed into OUTER JOIN +test quals are not pushed into OUTER JOIN +CREATE TABLE outer_join_1 (id int, name text,time timestamptz NOT NULL DEFAULT '2000-01-01'); +CREATE TABLE outer_join_2 (id int, name text,time timestamptz NOT NULL DEFAULT '2000-01-01'); +SELECT (SELECT table_name FROM create_hypertable(tbl, 'time')) FROM (VALUES ('outer_join_1'),('outer_join_2')) v(tbl); + table_name +-------------- + outer_join_1 + outer_join_2 +(2 rows) + +INSERT INTO outer_join_1 VALUES(1,'a'), (2,'b'); +INSERT INTO outer_join_2 VALUES(1,'a'); +:PREFIX SELECT one.id, two.name FROM outer_join_1 one LEFT OUTER JOIN outer_join_2 two ON one.id=two.id WHERE one.id=2; + QUERY PLAN +------------------------------------------------- + Nested Loop Left Join + Join Filter: (one.id = two.id) + -> Seq Scan on _hyper_9_176_chunk one + Filter: (id = 2) + -> Materialize + -> Seq Scan on _hyper_10_177_chunk two + Filter: (id = 2) +(7 rows) + +:PREFIX SELECT one.id, two.name FROM outer_join_2 two RIGHT OUTER JOIN outer_join_1 one ON one.id=two.id WHERE one.id=2; + QUERY PLAN +------------------------------------------------- + Nested Loop Left Join + Join Filter: (one.id = two.id) + -> Seq Scan on _hyper_9_176_chunk one + Filter: (id = 2) + -> Materialize + -> Seq Scan on _hyper_10_177_chunk two + Filter: (id = 2) +(7 rows) + +DROP TABLE outer_join_1; +DROP TABLE outer_join_2; +-- test UNION between regular table and hypertable +SELECT time FROM regular_timestamptz UNION SELECT time FROM metrics_timestamptz ORDER BY 1; + time +------------------------------ + Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST +(32 rows) + +-- test UNION ALL between regular table and hypertable +SELECT time FROM regular_timestamptz UNION ALL SELECT time FROM metrics_timestamptz ORDER BY 1; + time +------------------------------ + Sat Jan 01 00:00:00 2000 PST + Sat Jan 01 00:00:00 2000 PST + Sat Jan 01 00:00:00 2000 PST + Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST +(128 rows) + +-- test nested join qual propagation +:PREFIX +SELECT * FROM ( +SELECT o1_m1.time FROM metrics_timestamptz o1_m1 INNER JOIN metrics_timestamptz_2 o1_m2 ON true WHERE o1_m2.time = o1_m1.time AND o1_m1.device_id = o1_m2.device_id AND o1_m2.time < '2000-01-10' AND o1_m1.device_id = 1 +) o1 FULL OUTER JOIN ( +SELECT o2_m1.time FROM metrics_timestamptz o2_m1 FULL OUTER JOIN metrics_timestamptz_2 o2_m2 ON true WHERE o2_m2.time = o2_m1.time AND o2_m1.device_id = o2_m2.device_id AND o2_m2.time > '2000-01-20' AND o2_m1.device_id = 2 +) o2 ON o1.time = o2.time ORDER BY 1,2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: o1_m1_1."time", o2_m1_1."time" + -> Merge Full Join + Merge Cond: (o2_m1_1."time" = o1_m1_1."time") + -> Nested Loop + -> Merge Append + Sort Key: o2_m2_1."time" + -> Index Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk o2_m2_1 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk o2_m2_2 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk o2_m2_3 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Append + -> Index Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk o2_m1_1 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk o2_m1_2 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk o2_m1_3 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk o2_m1_4 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk o2_m1_5 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Materialize + -> Nested Loop + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 o1_m2 + Order: o1_m2."time" + -> Index Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk o1_m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk o1_m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Append + -> Index Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk o1_m1_1 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk o1_m1_2 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk o1_m1_3 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk o1_m1_4 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk o1_m1_5 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) +(58 rows) + +:PREFIX +SELECT * FROM ( +SELECT o1_m1.time FROM metrics_timestamptz o1_m1 INNER JOIN metrics_timestamptz_2 o1_m2 ON o1_m2.time = o1_m1.time AND o1_m1.device_id = o1_m2.device_id WHERE o1_m2.time < '2000-01-10' AND o1_m1.device_id = 1 +) o1 FULL OUTER JOIN ( +SELECT o2_m1.time FROM metrics_timestamptz o2_m1 FULL OUTER JOIN metrics_timestamptz_2 o2_m2 ON o2_m2.time = o2_m1.time AND o2_m1.device_id = o2_m2.device_id WHERE o2_m2.time > '2000-01-20' AND o2_m1.device_id = 2 +) o2 ON o1.time = o2.time ORDER BY 1,2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: o1_m1_1."time", o2_m1_1."time" + -> Merge Full Join + Merge Cond: (o2_m1_1."time" = o1_m1_1."time") + -> Nested Loop + -> Merge Append + Sort Key: o2_m2_1."time" + -> Index Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk o2_m2_1 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk o2_m2_2 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk o2_m2_3 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Append + -> Index Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk o2_m1_1 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk o2_m1_2 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk o2_m1_3 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk o2_m1_4 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk o2_m1_5 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Materialize + -> Nested Loop + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 o1_m2 + Order: o1_m2."time" + -> Index Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk o1_m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk o1_m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Append + -> Index Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk o1_m1_1 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk o1_m1_2 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk o1_m1_3 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk o1_m1_4 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk o1_m1_5 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) +(58 rows) + +\ir include/plan_expand_hypertable_chunks_in_query.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +--we want to see how our logic excludes chunks +--and not how much work constraint_exclusion does +SET constraint_exclusion = 'off'; +:PREFIX SELECT * FROM hyper ORDER BY value; + QUERY PLAN +-------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + -> Seq Scan on _hyper_1_2_chunk + -> Seq Scan on _hyper_1_3_chunk + -> Seq Scan on _hyper_1_4_chunk + -> Seq Scan on _hyper_1_5_chunk + -> Seq Scan on _hyper_1_6_chunk + -> Seq Scan on _hyper_1_7_chunk + -> Seq Scan on _hyper_1_8_chunk + -> Seq Scan on _hyper_1_9_chunk + -> Seq Scan on _hyper_1_10_chunk + -> Seq Scan on _hyper_1_11_chunk + -> Seq Scan on _hyper_1_12_chunk + -> Seq Scan on _hyper_1_13_chunk + -> Seq Scan on _hyper_1_14_chunk + -> Seq Scan on _hyper_1_15_chunk + -> Seq Scan on _hyper_1_16_chunk + -> Seq Scan on _hyper_1_17_chunk + -> Seq Scan on _hyper_1_18_chunk + -> Seq Scan on _hyper_1_19_chunk + -> Seq Scan on _hyper_1_20_chunk + -> Seq Scan on _hyper_1_21_chunk + -> Seq Scan on _hyper_1_22_chunk + -> Seq Scan on _hyper_1_23_chunk + -> Seq Scan on _hyper_1_24_chunk + -> Seq Scan on _hyper_1_25_chunk + -> Seq Scan on _hyper_1_26_chunk + -> Seq Scan on _hyper_1_27_chunk + -> Seq Scan on _hyper_1_28_chunk + -> Seq Scan on _hyper_1_29_chunk + -> Seq Scan on _hyper_1_30_chunk + -> Seq Scan on _hyper_1_31_chunk + -> Seq Scan on _hyper_1_32_chunk + -> Seq Scan on _hyper_1_33_chunk + -> Seq Scan on _hyper_1_34_chunk + -> Seq Scan on _hyper_1_35_chunk + -> Seq Scan on _hyper_1_36_chunk + -> Seq Scan on _hyper_1_37_chunk + -> Seq Scan on _hyper_1_38_chunk + -> Seq Scan on _hyper_1_39_chunk + -> Seq Scan on _hyper_1_40_chunk + -> Seq Scan on _hyper_1_41_chunk + -> Seq Scan on _hyper_1_42_chunk + -> Seq Scan on _hyper_1_43_chunk + -> Seq Scan on _hyper_1_44_chunk + -> Seq Scan on _hyper_1_45_chunk + -> Seq Scan on _hyper_1_46_chunk + -> Seq Scan on _hyper_1_47_chunk + -> Seq Scan on _hyper_1_48_chunk + -> Seq Scan on _hyper_1_49_chunk + -> Seq Scan on _hyper_1_50_chunk + -> Seq Scan on _hyper_1_51_chunk + -> Seq Scan on _hyper_1_52_chunk + -> Seq Scan on _hyper_1_53_chunk + -> Seq Scan on _hyper_1_54_chunk + -> Seq Scan on _hyper_1_55_chunk + -> Seq Scan on _hyper_1_56_chunk + -> Seq Scan on _hyper_1_57_chunk + -> Seq Scan on _hyper_1_58_chunk + -> Seq Scan on _hyper_1_59_chunk + -> Seq Scan on _hyper_1_60_chunk + -> Seq Scan on _hyper_1_61_chunk + -> Seq Scan on _hyper_1_62_chunk + -> Seq Scan on _hyper_1_63_chunk + -> Seq Scan on _hyper_1_64_chunk + -> Seq Scan on _hyper_1_65_chunk + -> Seq Scan on _hyper_1_66_chunk + -> Seq Scan on _hyper_1_67_chunk + -> Seq Scan on _hyper_1_68_chunk + -> Seq Scan on _hyper_1_69_chunk + -> Seq Scan on _hyper_1_70_chunk + -> Seq Scan on _hyper_1_71_chunk + -> Seq Scan on _hyper_1_72_chunk + -> Seq Scan on _hyper_1_73_chunk + -> Seq Scan on _hyper_1_74_chunk + -> Seq Scan on _hyper_1_75_chunk + -> Seq Scan on _hyper_1_76_chunk + -> Seq Scan on _hyper_1_77_chunk + -> Seq Scan on _hyper_1_78_chunk + -> Seq Scan on _hyper_1_79_chunk + -> Seq Scan on _hyper_1_80_chunk + -> Seq Scan on _hyper_1_81_chunk + -> Seq Scan on _hyper_1_82_chunk + -> Seq Scan on _hyper_1_83_chunk + -> Seq Scan on _hyper_1_84_chunk + -> Seq Scan on _hyper_1_85_chunk + -> Seq Scan on _hyper_1_86_chunk + -> Seq Scan on _hyper_1_87_chunk + -> Seq Scan on _hyper_1_88_chunk + -> Seq Scan on _hyper_1_89_chunk + -> Seq Scan on _hyper_1_90_chunk + -> Seq Scan on _hyper_1_91_chunk + -> Seq Scan on _hyper_1_92_chunk + -> Seq Scan on _hyper_1_93_chunk + -> Seq Scan on _hyper_1_94_chunk + -> Seq Scan on _hyper_1_95_chunk + -> Seq Scan on _hyper_1_96_chunk + -> Seq Scan on _hyper_1_97_chunk + -> Seq Scan on _hyper_1_98_chunk + -> Seq Scan on _hyper_1_99_chunk + -> Seq Scan on _hyper_1_100_chunk + -> Seq Scan on _hyper_1_101_chunk + -> Seq Scan on _hyper_1_102_chunk +(105 rows) + +-- explicit chunk exclusion +:PREFIX SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[1,2]) ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + -> Seq Scan on _hyper_1_2_chunk +(5 rows) + +:PREFIX SELECT * FROM (SELECT * FROM hyper h WHERE _timescaledb_functions.chunks_in(h, ARRAY[1,2,3])) T ORDER BY value; + QUERY PLAN +---------------------------------------------- + Sort + Sort Key: h_1.value + -> Append + -> Seq Scan on _hyper_1_1_chunk h_1 + -> Seq Scan on _hyper_1_2_chunk h_2 + -> Seq Scan on _hyper_1_3_chunk h_3 +(6 rows) + +:PREFIX SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[1,2,3]) AND time < 10 ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_1_2_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_1_3_chunk + Filter: ("time" < 10) +(9 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE device_id = 'dev1' AND time < to_timestamp(10) AND _timescaledb_functions.chunks_in(hyper_ts, ARRAY[116]) ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(4 rows) + +:PREFIX SELECT * FROM hyper_ts h JOIN tag on (h.tag_id = tag.id ) WHERE _timescaledb_functions.chunks_in(h, ARRAY[116]) AND time < to_timestamp(10) AND device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: h.value + -> Merge Join + Merge Cond: (tag.id = h.tag_id) + -> Index Scan using tag_pkey on tag + -> Sort + Sort Key: h.tag_id + -> Seq Scan on _hyper_3_116_chunk h + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(9 rows) + +:PREFIX SELECT * FROM hyper_w_space h1 JOIN hyper_ts h2 ON h1.device_id=h2.device_id WHERE _timescaledb_functions.chunks_in(h1, ARRAY[104,105]) AND _timescaledb_functions.chunks_in(h2, ARRAY[116,117]) ORDER BY h1.value; + QUERY PLAN +------------------------------------------------------------- + Sort + Sort Key: h1_1.value + -> Hash Join + Hash Cond: (h2_1.device_id = h1_1.device_id) + -> Append + -> Seq Scan on _hyper_3_116_chunk h2_1 + -> Seq Scan on _hyper_3_117_chunk h2_2 + -> Hash + -> Append + -> Seq Scan on _hyper_2_104_chunk h1_1 + -> Seq Scan on _hyper_2_105_chunk h1_2 +(11 rows) + +:PREFIX SELECT * FROM hyper_w_space h1 JOIN hyper_ts h2 ON h1.device_id=h2.device_id AND _timescaledb_functions.chunks_in(h2, ARRAY[116,117]) WHERE _timescaledb_functions.chunks_in(h1, ARRAY[104,105]) ORDER BY h1.value; + QUERY PLAN +------------------------------------------------------------- + Sort + Sort Key: h1_1.value + -> Hash Join + Hash Cond: (h2_1.device_id = h1_1.device_id) + -> Append + -> Seq Scan on _hyper_3_116_chunk h2_1 + -> Seq Scan on _hyper_3_117_chunk h2_2 + -> Hash + -> Append + -> Seq Scan on _hyper_2_104_chunk h1_1 + -> Seq Scan on _hyper_2_105_chunk h1_2 +(11 rows) + +:PREFIX SELECT * FROM hyper h1, hyper h2 WHERE _timescaledb_functions.chunks_in(h1, ARRAY[1,2]) AND _timescaledb_functions.chunks_in(h2, ARRAY[2,3]); + QUERY PLAN +----------------------------------------------------- + Nested Loop + -> Append + -> Seq Scan on _hyper_1_1_chunk h1_1 + -> Seq Scan on _hyper_1_2_chunk h1_2 + -> Materialize + -> Append + -> Seq Scan on _hyper_1_2_chunk h2_1 + -> Seq Scan on _hyper_1_3_chunk h2_2 +(8 rows) + +SET enable_seqscan=false; +-- Should perform index-only scan. Since we pass whole row into the function it might block planner from using index-only scan. +-- But since we'll remove the function from the query tree before planner decision it shouldn't affect index-only decision. +:PREFIX SELECT time FROM hyper WHERE time=0 AND _timescaledb_functions.chunks_in(hyper, ARRAY[1]); + QUERY PLAN +--------------------------------------------------------------------------- + Index Only Scan using _hyper_1_1_chunk_hyper_time_idx on _hyper_1_1_chunk + Index Cond: ("time" = 0) +(2 rows) + +:PREFIX SELECT first(value, time) FROM hyper h WHERE _timescaledb_functions.chunks_in(h, ARRAY[1]); + QUERY PLAN +----------------------------------------------------------------------------------------------- + Result + InitPlan 1 (returns $0) + -> Limit + -> Index Scan Backward using _hyper_1_1_chunk_hyper_time_idx on _hyper_1_1_chunk h + Index Cond: ("time" IS NOT NULL) +(5 rows) + +\set ON_ERROR_STOP 0 +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[1,2]) AND _timescaledb_functions.chunks_in(hyper, ARRAY[2,3]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:26: ERROR: illegal invocation of chunks_in function +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(2, ARRAY[1]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:27: ERROR: function _timescaledb_functions.chunks_in(integer, integer[]) does not exist at character 27 +SELECT * FROM hyper WHERE time < 10 OR _timescaledb_functions.chunks_in(hyper, ARRAY[1,2]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:28: ERROR: illegal invocation of chunks_in function +SELECT _timescaledb_functions.chunks_in(hyper, ARRAY[1,2]) FROM hyper; +psql:include/plan_expand_hypertable_chunks_in_query.sql:29: ERROR: illegal invocation of chunks_in function +-- non existing chunk id +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[123456789]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:31: ERROR: chunk id 123456789 not found +-- chunk that belongs to another hypertable +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[104]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:33: ERROR: chunk id 104 does not belong to hypertable "hyper" +-- passing wrong row ref +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(ROW(1,2), ARRAY[104]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:35: ERROR: first parameter for chunks_in function needs to be record +-- passing func as chunk id +SELECT * FROM hyper h WHERE _timescaledb_functions.chunks_in(h, array_append(ARRAY[1],current_setting('server_version_num')::int)); +psql:include/plan_expand_hypertable_chunks_in_query.sql:37: ERROR: second argument to chunk_in should contain only integer consts +-- NULL chunk IDs not allowed in chunk array +SELECT * FROM hyper h WHERE _timescaledb_functions.chunks_in(h, ARRAY[NULL::int]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:39: ERROR: chunk id can't be NULL +\set ON_ERROR_STOP 1 +-- chunks_in is STRICT function and for NULL arguments a null result is returned +SELECT * FROM hyper h WHERE _timescaledb_functions.chunks_in(h, NULL); + value | time +-------+------ +(0 rows) + +\set ECHO errors +RESET timescaledb.enable_optimizations; +CREATE TABLE t(time timestamptz NOT NULL); +SELECT table_name FROM create_hypertable('t','time'); + table_name +------------ + t +(1 row) + +INSERT INTO t VALUES ('2000-01-01'), ('2010-01-01'), ('2020-01-01'); +EXPLAIN (costs off) SELECT * FROM t t1 INNER JOIN t t2 ON t1.time = t2.time WHERE t1.time < timestamptz '2010-01-01'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (t1_1."time" = t2_1."time") + -> Merge Append + Sort Key: t1_1."time" + -> Index Only Scan Backward using _hyper_15_182_chunk_t_time_idx on _hyper_15_182_chunk t1_1 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_15_183_chunk_t_time_idx on _hyper_15_183_chunk t1_2 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Materialize + -> Merge Append + Sort Key: t2_1."time" + -> Index Only Scan Backward using _hyper_15_182_chunk_t_time_idx on _hyper_15_182_chunk t2_1 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_15_183_chunk_t_time_idx on _hyper_15_183_chunk t2_2 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) +(15 rows) + +SET timescaledb.enable_qual_propagation TO false; +EXPLAIN (costs off) SELECT * FROM t t1 INNER JOIN t t2 ON t1.time = t2.time WHERE t1.time < timestamptz '2010-01-01'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (t1_1."time" = t2_1."time") + -> Merge Append + Sort Key: t1_1."time" + -> Index Only Scan Backward using _hyper_15_182_chunk_t_time_idx on _hyper_15_182_chunk t1_1 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_15_183_chunk_t_time_idx on _hyper_15_183_chunk t1_2 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Materialize + -> Merge Append + Sort Key: t2_1."time" + -> Index Only Scan Backward using _hyper_15_182_chunk_t_time_idx on _hyper_15_182_chunk t2_1 + -> Index Only Scan Backward using _hyper_15_183_chunk_t_time_idx on _hyper_15_183_chunk t2_2 + -> Index Only Scan Backward using _hyper_15_184_chunk_t_time_idx on _hyper_15_184_chunk t2_3 +(14 rows) + +RESET timescaledb.enable_qual_propagation; +CREATE TABLE test (a int, time timestamptz NOT NULL); +SELECT table_name FROM create_hypertable('public.test', 'time'); + table_name +------------ + test +(1 row) + +INSERT INTO test SELECT i, '2020-04-01'::date-10-i from generate_series(1,20) i; +CREATE OR REPLACE FUNCTION test_f(_ts timestamptz) +RETURNS SETOF test LANGUAGE SQL STABLE PARALLEL SAFE +AS $f$ + SELECT DISTINCT ON (a) * FROM test WHERE time >= _ts ORDER BY a, time DESC +$f$; +EXPLAIN (costs off) SELECT * FROM test_f(now()); + QUERY PLAN +------------------------------------------------- + Unique + -> Sort + Sort Key: test.a, test."time" DESC + -> Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 4 +(5 rows) + +EXPLAIN (costs off) SELECT * FROM test_f(now()); + QUERY PLAN +------------------------------------------------- + Unique + -> Sort + Sort Key: test.a, test."time" DESC + -> Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 4 +(5 rows) + +CREATE TABLE t1 (a int, b int NOT NULL); +SELECT create_hypertable('t1', 'b', chunk_time_interval=>10); + create_hypertable +------------------- + (17,public,t1,t) +(1 row) + +CREATE TABLE t2 (a int, b int NOT NULL); +SELECT create_hypertable('t2', 'b', chunk_time_interval=>10); + create_hypertable +------------------- + (18,public,t2,t) +(1 row) + +CREATE OR REPLACE FUNCTION f_t1(_a int, _b int) + RETURNS SETOF t1 + LANGUAGE SQL + STABLE PARALLEL SAFE +AS $function$ + SELECT DISTINCT ON (a) * FROM t1 WHERE a = _a and b = _b ORDER BY a, b DESC +$function$ +; +CREATE OR REPLACE FUNCTION f_t2(_a int, _b int) RETURNS SETOF t2 LANGUAGE sql STABLE PARALLEL SAFE +AS $function$ + SELECT DISTINCT ON (j.a) j.* + FROM + f_t1(_a, _b) sc, + t2 j + WHERE + j.b = _b AND + j.a = _a + ORDER BY j.a, j.b DESC +$function$ +; +CREATE OR REPLACE FUNCTION f_t1_2(_b int) RETURNS SETOF t1 LANGUAGE SQL STABLE PARALLEL SAFE +AS $function$ + SELECT DISTINCT ON (j.a) jt.* FROM t1 j, f_t1(j.a, _b) jt +$function$; +EXPLAIN (costs off) SELECT * FROM f_t1_2(10); + QUERY PLAN +--------------------------------------------------------------- + Subquery Scan on f_t1_2 + -> Unique + -> Sort + Sort Key: j.a + -> Nested Loop + -> Seq Scan on t1 j + -> Unique + -> Index Scan using t1_b_idx on t1 + Index Cond: (b = 10) + Filter: (a = j.a) +(10 rows) + +EXPLAIN (costs off) SELECT * FROM f_t1_2(10) sc, f_t2(sc.a, 10); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Unique + -> Sort + Sort Key: j.a + -> Nested Loop + -> Seq Scan on t1 j + -> Unique + -> Index Scan using t1_b_idx on t1 + Index Cond: (b = 10) + Filter: (a = j.a) + -> Unique + -> Nested Loop + -> Unique + -> Index Scan using t1_b_idx on t1 t1_1 + Index Cond: (b = 10) + Filter: (a = t1.a) + -> Index Scan using t2_b_idx on t2 j_1 + Index Cond: (b = 10) + Filter: (a = t1.a) +(19 rows) + +--TEST END-- diff --git a/test/expected/plan_expand_hypertable-16.out b/test/expected/plan_expand_hypertable-16.out new file mode 100644 index 00000000000..ae5018f2a23 --- /dev/null +++ b/test/expected/plan_expand_hypertable-16.out @@ -0,0 +1,3024 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\set PREFIX 'EXPLAIN (costs off) ' +\ir include/plan_expand_hypertable_load.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +--single time dimension +CREATE TABLE hyper ("time_broken" bigint NOT NULL, "value" integer); +ALTER TABLE hyper +DROP COLUMN time_broken, +ADD COLUMN time BIGINT; +SELECT create_hypertable('hyper', 'time', chunk_time_interval => 10); +psql:include/plan_expand_hypertable_load.sql:12: NOTICE: adding not-null constraint to column "time" + create_hypertable +-------------------- + (1,public,hyper,t) +(1 row) + +INSERT INTO hyper SELECT g, g FROM generate_series(0,1000) g; +--insert a point with INT_MAX_64 +INSERT INTO hyper (time, value) SELECT 9223372036854775807::bigint, 0; +--time and space +CREATE TABLE hyper_w_space ("time_broken" bigint NOT NULL, "device_id" text, "value" integer); +ALTER TABLE hyper_w_space +DROP COLUMN time_broken, +ADD COLUMN time BIGINT; +SELECT create_hypertable('hyper_w_space', 'time', 'device_id', 4, chunk_time_interval => 10); +psql:include/plan_expand_hypertable_load.sql:26: NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (2,public,hyper_w_space,t) +(1 row) + +INSERT INTO hyper_w_space (time, device_id, value) SELECT g, 'dev' || g, g FROM generate_series(0,30) g; +CREATE VIEW hyper_w_space_view AS (SELECT * FROM hyper_w_space); +--with timestamp and space +CREATE TABLE tag (id serial PRIMARY KEY, name text); +CREATE TABLE hyper_ts ("time_broken" timestamptz NOT NULL, "device_id" text, tag_id INT REFERENCES tag(id), "value" integer); +ALTER TABLE hyper_ts +DROP COLUMN time_broken, +ADD COLUMN time TIMESTAMPTZ; +SELECT create_hypertable('hyper_ts', 'time', 'device_id', 2, chunk_time_interval => '10 seconds'::interval); +psql:include/plan_expand_hypertable_load.sql:41: NOTICE: adding not-null constraint to column "time" + create_hypertable +----------------------- + (3,public,hyper_ts,t) +(1 row) + +INSERT INTO tag(name) SELECT 'tag'||g FROM generate_series(0,10) g; +INSERT INTO hyper_ts (time, device_id, tag_id, value) SELECT to_timestamp(g), 'dev' || g, (random() /10)+1, g FROM generate_series(0,30) g; +--one in the future +INSERT INTO hyper_ts (time, device_id, tag_id, value) VALUES ('2100-01-01 02:03:04 PST', 'dev101', 1, 0); +--time partitioning function +CREATE OR REPLACE FUNCTION unix_to_timestamp(unixtime float8) + RETURNS TIMESTAMPTZ LANGUAGE SQL IMMUTABLE PARALLEL SAFE STRICT AS +$BODY$ + SELECT to_timestamp(unixtime); +$BODY$; +CREATE TABLE hyper_timefunc ("time" float8 NOT NULL, "device_id" text, "value" integer); +SELECT create_hypertable('hyper_timefunc', 'time', 'device_id', 4, chunk_time_interval => 10, time_partitioning_func => 'unix_to_timestamp'); +psql:include/plan_expand_hypertable_load.sql:57: WARNING: unexpected interval: smaller than one second + create_hypertable +----------------------------- + (4,public,hyper_timefunc,t) +(1 row) + +INSERT INTO hyper_timefunc (time, device_id, value) SELECT g, 'dev' || g, g FROM generate_series(0,30) g; +CREATE TABLE metrics_timestamp(time timestamp); +SELECT create_hypertable('metrics_timestamp','time'); +psql:include/plan_expand_hypertable_load.sql:62: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +psql:include/plan_expand_hypertable_load.sql:62: NOTICE: adding not-null constraint to column "time" + create_hypertable +-------------------------------- + (5,public,metrics_timestamp,t) +(1 row) + +INSERT INTO metrics_timestamp SELECT generate_series('2000-01-01'::timestamp,'2000-02-01'::timestamp,'1d'::interval); +CREATE TABLE metrics_timestamptz(time timestamptz, device_id int); +SELECT create_hypertable('metrics_timestamptz','time'); +psql:include/plan_expand_hypertable_load.sql:66: NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------------- + (6,public,metrics_timestamptz,t) +(1 row) + +INSERT INTO metrics_timestamptz SELECT generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval), 1; +INSERT INTO metrics_timestamptz SELECT generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval), 2; +INSERT INTO metrics_timestamptz SELECT generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval), 3; +--create a second table to test joins with +CREATE TABLE metrics_timestamptz_2 (LIKE metrics_timestamptz); +SELECT create_hypertable('metrics_timestamptz_2','time'); + create_hypertable +------------------------------------ + (7,public,metrics_timestamptz_2,t) +(1 row) + +INSERT INTO metrics_timestamptz_2 +SELECT * FROM metrics_timestamptz; +INSERT INTO metrics_timestamptz_2 VALUES ('2000-12-01'::timestamptz, 3); +CREATE TABLE metrics_date(time date); +SELECT create_hypertable('metrics_date','time'); +psql:include/plan_expand_hypertable_load.sql:79: NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------- + (8,public,metrics_date,t) +(1 row) + +INSERT INTO metrics_date SELECT generate_series('2000-01-01'::date,'2000-02-01'::date,'1d'::interval); +ANALYZE hyper; +ANALYZE hyper_w_space; +ANALYZE tag; +ANALYZE hyper_ts; +ANALYZE hyper_timefunc; +-- create normal table for JOIN tests +CREATE TABLE regular_timestamptz(time timestamptz); +INSERT INTO regular_timestamptz SELECT generate_series('2000-01-01'::timestamptz,'2000-02-01'::timestamptz,'1d'::interval); +\ir include/plan_expand_hypertable_query.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +--we want to see how our logic excludes chunks +--and not how much work constraint_exclusion does +SET constraint_exclusion = 'off'; +\qecho test upper bounds +test upper bounds +:PREFIX SELECT * FROM hyper WHERE time < 10 ORDER BY value; + QUERY PLAN +------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time < 11 ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 11) + -> Seq Scan on _hyper_1_2_chunk + Filter: ("time" < 11) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE time = 10 ORDER BY value; + QUERY PLAN +------------------------------------ + Sort + Sort Key: _hyper_1_2_chunk.value + -> Seq Scan on _hyper_1_2_chunk + Filter: ("time" = 10) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE 10 >= time ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (10 >= "time") + -> Seq Scan on _hyper_1_2_chunk + Filter: (10 >= "time") +(7 rows) + +\qecho test lower bounds +test lower bounds +:PREFIX SELECT * FROM hyper WHERE time >= 10 and time < 20 ORDER BY value; + QUERY PLAN +---------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk.value + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" >= 10) AND ("time" < 20)) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE 10 < time and 20 >= time ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk.value + -> Append + -> Seq Scan on _hyper_1_2_chunk + Filter: ((10 < "time") AND (20 >= "time")) + -> Seq Scan on _hyper_1_3_chunk + Filter: ((10 < "time") AND (20 >= "time")) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE time >= 9 and time < 20 ORDER BY value; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 9) AND ("time" < 20)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" >= 9) AND ("time" < 20)) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE time > 9 and time < 20 ORDER BY value; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk.value + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 9) AND ("time" < 20)) +(4 rows) + +\qecho test empty result +test empty result +:PREFIX SELECT * FROM hyper WHERE time < 0; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +\qecho test expression evaluation +test expression evaluation +:PREFIX SELECT * FROM hyper WHERE time < (5*2)::smallint; + QUERY PLAN +------------------------------------- + Seq Scan on _hyper_1_1_chunk + Filter: ("time" < '10'::smallint) +(2 rows) + +\qecho test logic at INT64_MAX +test logic at INT64_MAX +:PREFIX SELECT * FROM hyper WHERE time = 9223372036854775807::bigint ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_1_102_chunk.value + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" = '9223372036854775807'::bigint) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time = 9223372036854775806::bigint ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_1_102_chunk.value + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" = '9223372036854775806'::bigint) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time >= 9223372036854775807::bigint ORDER BY value; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: _hyper_1_102_chunk.value + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" >= '9223372036854775807'::bigint) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time > 9223372036854775807::bigint ORDER BY value; + QUERY PLAN +-------------------------------- + Sort + Sort Key: value + -> Result + One-Time Filter: false +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time > 9223372036854775806::bigint ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_1_102_chunk.value + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" > '9223372036854775806'::bigint) +(4 rows) + +\qecho cte +cte +:PREFIX WITH cte AS( + SELECT * FROM hyper WHERE time < 10 +) +SELECT * FROM cte ORDER BY value; + QUERY PLAN +------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) +(4 rows) + +\qecho subquery +subquery +:PREFIX SELECT 0 = ANY (SELECT value FROM hyper WHERE time < 10); + QUERY PLAN +-------------------------------------- + Result + SubPlan 1 + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) +(4 rows) + +\qecho no space constraint +no space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 ORDER BY value; + QUERY PLAN +-------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < 10) +(11 rows) + +\qecho valid space constraint +valid space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and device_id = 'dev5' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and 'dev5' = device_id ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND ('dev5'::text = device_id)) +(4 rows) + +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and 'dev'||(2+3) = device_id ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND ('dev5'::text = device_id)) +(4 rows) + +\qecho only space constraint +only space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE 'dev5' = device_id ORDER BY value; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Append + -> Seq Scan on _hyper_2_106_chunk + Filter: ('dev5'::text = device_id) + -> Seq Scan on _hyper_2_109_chunk + Filter: ('dev5'::text = device_id) + -> Seq Scan on _hyper_2_111_chunk + Filter: ('dev5'::text = device_id) +(9 rows) + +\qecho unhandled space constraint +unhandled space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and device_id > 'dev5' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) + -> Seq Scan on _hyper_2_104_chunk + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) +(11 rows) + +\qecho use of OR - does not filter chunks +use of OR - does not filter chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND (device_id = 'dev5' or device_id = 'dev6') ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) + -> Seq Scan on _hyper_2_104_chunk + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) +(11 rows) + +\qecho cte +cte +:PREFIX WITH cte AS( + SELECT * FROM hyper_w_space WHERE time < 10 and device_id = 'dev5' +) +SELECT * FROM cte ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho subquery +subquery +:PREFIX SELECT 0 = ANY (SELECT value FROM hyper_w_space WHERE time < 10 and device_id = 'dev5'); + QUERY PLAN +------------------------------------------------------------------ + Result + SubPlan 1 + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho view +view +:PREFIX SELECT * FROM hyper_w_space_view WHERE time < 10 and device_id = 'dev5' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho IN statement - simple +IN statement - simple +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id IN ('dev5') ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho IN statement - two chunks +IN statement - two chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id IN ('dev5','dev6') ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_105_chunk.value + -> Append + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[]))) +(7 rows) + +\qecho IN statement - one chunk +IN statement - one chunk +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id IN ('dev4','dev5') ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev4,dev5}'::text[]))) +(4 rows) + +\qecho NOT IN - does not filter chunks +NOT IN - does not filter chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id NOT IN ('dev5','dev6') ORDER BY value; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND (device_id <> ALL ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_104_chunk + Filter: (("time" < 10) AND (device_id <> ALL ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id <> ALL ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id <> ALL ('{dev5,dev6}'::text[]))) +(11 rows) + +\qecho IN statement with subquery - does not filter chunks +IN statement with subquery - does not filter chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id IN (SELECT 'dev5'::text) ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: _hyper_2_106_chunk.value + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(4 rows) + +\qecho ANY +ANY +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id = ANY(ARRAY['dev5','dev6']) ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_105_chunk.value + -> Append + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[]))) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[]))) +(7 rows) + +\qecho ANY with intersection +ANY with intersection +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id = ANY(ARRAY['dev5','dev6']) AND device_id = ANY(ARRAY['dev6','dev7']) ORDER BY value; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_105_chunk.value + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND (device_id = ANY ('{dev5,dev6}'::text[])) AND (device_id = ANY ('{dev6,dev7}'::text[]))) +(4 rows) + +\qecho ANY without intersection shouldnt scan any chunks +ANY without intersection shouldnt scan any chunks +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND device_id = ANY(ARRAY['dev5','dev6']) AND device_id = ANY(ARRAY['dev8','dev9']) ORDER BY value; + QUERY PLAN +-------------------------------- + Sort + Sort Key: value + -> Result + One-Time Filter: false +(4 rows) + +\qecho ANY/IN/ALL only works for equals operator +ANY/IN/ALL only works for equals operator +:PREFIX SELECT * FROM hyper_w_space WHERE device_id < ANY(ARRAY['dev5','dev6']) ORDER BY value; + QUERY PLAN +----------------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_107_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_108_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_109_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_110_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_111_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_112_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_113_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_114_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) + -> Seq Scan on _hyper_2_115_chunk + Filter: (device_id < ANY ('{dev5,dev6}'::text[])) +(29 rows) + +\qecho ALL with equals and different values shouldnt scan any chunks +ALL with equals and different values shouldnt scan any chunks +:PREFIX SELECT * FROM hyper_w_space WHERE device_id = ALL(ARRAY['dev5','dev6']) ORDER BY value; + QUERY PLAN +-------------------------------- + Sort + Sort Key: value + -> Result + One-Time Filter: false +(4 rows) + +\qecho Multi AND +Multi AND +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND time < 100 ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND ("time" < 100)) + -> Seq Scan on _hyper_2_104_chunk + Filter: (("time" < 10) AND ("time" < 100)) + -> Seq Scan on _hyper_2_105_chunk + Filter: (("time" < 10) AND ("time" < 100)) + -> Seq Scan on _hyper_2_106_chunk + Filter: (("time" < 10) AND ("time" < 100)) +(11 rows) + +\qecho Time dimension doesnt filter chunks when using IN/ANY with multiple arguments +Time dimension doesnt filter chunks when using IN/ANY with multiple arguments +:PREFIX SELECT * FROM hyper_w_space WHERE time < ANY(ARRAY[1,2]) ORDER BY value; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_107_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_108_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_109_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_110_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_111_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_112_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_113_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_114_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) + -> Seq Scan on _hyper_2_115_chunk + Filter: ("time" < ANY ('{1,2}'::integer[])) +(29 rows) + +\qecho Time dimension chunk filtering works for ANY with single argument +Time dimension chunk filtering works for ANY with single argument +:PREFIX SELECT * FROM hyper_w_space WHERE time < ANY(ARRAY[1]) ORDER BY value; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < ANY ('{1}'::integer[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < ANY ('{1}'::integer[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < ANY ('{1}'::integer[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < ANY ('{1}'::integer[])) +(11 rows) + +\qecho Time dimension chunk filtering works for ALL with single argument +Time dimension chunk filtering works for ALL with single argument +:PREFIX SELECT * FROM hyper_w_space WHERE time < ALL(ARRAY[1]) ORDER BY value; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < ALL ('{1}'::integer[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < ALL ('{1}'::integer[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < ALL ('{1}'::integer[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < ALL ('{1}'::integer[])) +(11 rows) + +\qecho Time dimension chunk filtering works for ALL with multiple arguments +Time dimension chunk filtering works for ALL with multiple arguments +:PREFIX SELECT * FROM hyper_w_space WHERE time < ALL(ARRAY[1,10,20,30]) ORDER BY value; + QUERY PLAN +------------------------------------------------------------------ + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < ALL ('{1,10,20,30}'::integer[])) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < ALL ('{1,10,20,30}'::integer[])) + -> Seq Scan on _hyper_2_105_chunk + Filter: ("time" < ALL ('{1,10,20,30}'::integer[])) + -> Seq Scan on _hyper_2_106_chunk + Filter: ("time" < ALL ('{1,10,20,30}'::integer[])) +(11 rows) + +\qecho AND intersection using IN and EQUALS +AND intersection using IN and EQUALS +:PREFIX SELECT * FROM hyper_w_space WHERE device_id IN ('dev1','dev2') AND device_id = 'dev1' ORDER BY value; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_2_103_chunk.value + -> Append + -> Seq Scan on _hyper_2_103_chunk + Filter: ((device_id = ANY ('{dev1,dev2}'::text[])) AND (device_id = 'dev1'::text)) + -> Seq Scan on _hyper_2_110_chunk + Filter: ((device_id = ANY ('{dev1,dev2}'::text[])) AND (device_id = 'dev1'::text)) + -> Seq Scan on _hyper_2_114_chunk + Filter: ((device_id = ANY ('{dev1,dev2}'::text[])) AND (device_id = 'dev1'::text)) +(9 rows) + +\qecho AND with no intersection using IN and EQUALS +AND with no intersection using IN and EQUALS +:PREFIX SELECT * FROM hyper_w_space WHERE device_id IN ('dev1','dev2') AND device_id = 'dev3' ORDER BY value; + QUERY PLAN +-------------------------------- + Sort + Sort Key: value + -> Result + One-Time Filter: false +(4 rows) + +\qecho timestamps +timestamps +\qecho these should work since they are immutable functions +these should work since they are immutable functions +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969 PST'::timestamptz ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Append + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) +(7 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < to_timestamp(10) ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Append + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) +(7 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969'::timestamp AT TIME ZONE 'PST' ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Append + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) +(7 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(4 rows) + +\qecho these should not work since uses stable functions; +these should not work since uses stable functions; +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969'::timestamp ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------ + Sort + Sort Key: hyper_ts.value + -> Custom Scan (ChunkAppend) on hyper_ts + Chunks excluded during startup: 6 + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) +(8 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < ('Wed Dec 31 16:00:10 1969'::timestamp::timestamptz) ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Custom Scan (ChunkAppend) on hyper_ts + Chunks excluded during startup: 6 + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) +(8 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE NOW() < time ORDER BY value; + QUERY PLAN +--------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Custom Scan (ChunkAppend) on hyper_ts + Chunks excluded during startup: 7 + -> Seq Scan on _hyper_3_123_chunk + Filter: (now() < "time") +(6 rows) + +\qecho joins +joins +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.id=1) and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Nested Loop Semi Join + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text) AND (tag_id = 1) AND (tag_id = 1)) + -> Seq Scan on tag + Filter: (id = 1) +(7 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.id=1) or (time < to_timestamp(10) and device_id = 'dev1') ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Custom Scan (ChunkAppend) on hyper_ts + -> Seq Scan on _hyper_3_116_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + SubPlan 1 + -> Seq Scan on tag + Filter: (id = 1) + -> Seq Scan on _hyper_3_117_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_118_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_119_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_120_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_121_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_122_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_123_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) +(22 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.name='tag1') and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Nested Loop + Join Filter: (_hyper_3_116_chunk.tag_id = tag.id) + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) + -> Seq Scan on tag + Filter: (name = 'tag1'::text) +(8 rows) + +:PREFIX SELECT * FROM hyper_ts JOIN tag on (hyper_ts.tag_id = tag.id ) WHERE time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Merge Join + Merge Cond: (tag.id = _hyper_3_116_chunk.tag_id) + -> Index Scan using tag_pkey on tag + -> Sort + Sort Key: _hyper_3_116_chunk.tag_id + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(9 rows) + +:PREFIX SELECT * FROM hyper_ts JOIN tag on (hyper_ts.tag_id = tag.id ) WHERE tag.name = 'tag1' and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Nested Loop + Join Filter: (tag.id = _hyper_3_116_chunk.tag_id) + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) + -> Seq Scan on tag + Filter: (name = 'tag1'::text) +(8 rows) + +\qecho test constraint exclusion for constraints in ON clause of JOINs +test constraint exclusion for constraints in ON clause of JOINs +\qecho should exclude chunks on m1 and propagate qual to m2 because of INNER JOIN +should exclude chunks on m1 and propagate qual to m2 because of INNER JOIN +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +\qecho should exclude chunks on m2 and propagate qual to m1 because of INNER JOIN +should exclude chunks on m2 and propagate qual to m1 because of INNER JOIN +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +\qecho must not exclude on m1 +must not exclude on m1 +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + Join Filter: (m1."time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(19 rows) + +\qecho should exclude chunks on m2 +should exclude chunks on m2 +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(24 rows) + +\qecho should exclude chunks on m1 +should exclude chunks on m1 +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: m1."time" + -> Merge Right Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(25 rows) + +\qecho must not exclude chunks on m2 +must not exclude chunks on m2 +:PREFIX SELECT m1.time,m2.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: m1."time" + -> Merge Left Join + Merge Cond: (m2."time" = m1."time") + Join Filter: (m2."time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 +(21 rows) + +\qecho time_bucket exclusion +time_bucket exclusion +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) < 10::bigint ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < '10'::bigint) AND (time_bucket('10'::bigint, "time") < '10'::bigint)) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) < 11::bigint ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < '21'::bigint) AND (time_bucket('10'::bigint, "time") < '11'::bigint)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" < '21'::bigint) AND (time_bucket('10'::bigint, "time") < '11'::bigint)) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" < '21'::bigint) AND (time_bucket('10'::bigint, "time") < '11'::bigint)) +(9 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) <= 10::bigint ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" <= '20'::bigint) AND (time_bucket('10'::bigint, "time") <= '10'::bigint)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" <= '20'::bigint) AND (time_bucket('10'::bigint, "time") <= '10'::bigint)) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" <= '20'::bigint) AND (time_bucket('10'::bigint, "time") <= '10'::bigint)) +(9 rows) + +:PREFIX SELECT * FROM hyper WHERE 10::bigint > time_bucket(10, time) ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < '10'::bigint) AND ('10'::bigint > time_bucket('10'::bigint, "time"))) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE 11::bigint > time_bucket(10, time) ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk."time" + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < '21'::bigint) AND ('11'::bigint > time_bucket('10'::bigint, "time"))) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" < '21'::bigint) AND ('11'::bigint > time_bucket('10'::bigint, "time"))) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" < '21'::bigint) AND ('11'::bigint > time_bucket('10'::bigint, "time"))) +(9 rows) + +\qecho test overflow behaviour of time_bucket exclusion +test overflow behaviour of time_bucket exclusion +:PREFIX SELECT * FROM hyper WHERE time > 950 AND time_bucket(10, time) < '9223372036854775807'::bigint ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_96_chunk."time" + -> Append + -> Seq Scan on _hyper_1_96_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_97_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_98_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_99_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_100_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_101_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) + -> Seq Scan on _hyper_1_102_chunk + Filter: (("time" > 950) AND (time_bucket('10'::bigint, "time") < '9223372036854775807'::bigint)) +(17 rows) + +\qecho test timestamp upper boundary +test timestamp upper boundary +\qecho there should be no transformation if we are out of the supported (TimescaleDB-specific) range +there should be no transformation if we are out of the supported (TimescaleDB-specific) range +:PREFIX SELECT * FROM metrics_timestamp WHERE time_bucket('1d',time) < '294276-01-01'::timestamp ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Custom Scan (ChunkAppend) on metrics_timestamp + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_5_155_chunk_metrics_timestamp_time_idx on _hyper_5_155_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_156_chunk_metrics_timestamp_time_idx on _hyper_5_156_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_157_chunk_metrics_timestamp_time_idx on _hyper_5_157_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_158_chunk_metrics_timestamp_time_idx on _hyper_5_158_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_159_chunk_metrics_timestamp_time_idx on _hyper_5_159_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) +(12 rows) + +\qecho transformation would be out of range +transformation would be out of range +:PREFIX SELECT * FROM metrics_timestamp WHERE time_bucket('1000d',time) < '294276-01-01'::timestamp ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_5_155_chunk_metrics_timestamp_time_idx on _hyper_5_155_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_156_chunk_metrics_timestamp_time_idx on _hyper_5_156_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_157_chunk_metrics_timestamp_time_idx on _hyper_5_157_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_158_chunk_metrics_timestamp_time_idx on _hyper_5_158_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) + -> Index Only Scan Backward using _hyper_5_159_chunk_metrics_timestamp_time_idx on _hyper_5_159_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276'::timestamp without time zone) +(12 rows) + +\qecho test timestamptz upper boundary +test timestamptz upper boundary +\qecho there should be no transformation if we are out of the supported (TimescaleDB-specific) range +there should be no transformation if we are out of the supported (TimescaleDB-specific) range +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1d',time) < '294276-01-01'::timestamptz ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) +(12 rows) + +\qecho transformation would be out of range +transformation would be out of range +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1000d',time) < '294276-01-01'::timestamptz ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk + Filter: (time_bucket('@ 1000 days'::interval, "time") < 'Sat Jan 01 00:00:00 294276 PST'::timestamp with time zone) +(12 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) > 10 AND time_bucket(10, time) < 100 ORDER BY time; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk."time" + -> Append + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_4_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_5_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_6_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_7_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_8_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_9_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_10_chunk + Filter: (("time" > 10) AND ("time" < '100'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 100)) +(21 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(10, time) > 10 AND time_bucket(10, time) < 20 ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk."time" + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 10) AND ("time" < '20'::bigint) AND (time_bucket('10'::bigint, "time") > 10) AND (time_bucket('10'::bigint, "time") < 20)) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE time_bucket(1, time) > 11 AND time_bucket(1, time) < 19 ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk."time" + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 11) AND ("time" < '19'::bigint) AND (time_bucket('1'::bigint, "time") > 11) AND (time_bucket('1'::bigint, "time") < 19)) +(4 rows) + +:PREFIX SELECT * FROM hyper WHERE 10 < time_bucket(10, time) AND 20 > time_bucket(10,time) ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_1_2_chunk."time" + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 10) AND ("time" < '20'::bigint) AND (10 < time_bucket('10'::bigint, "time")) AND (20 > time_bucket('10'::bigint, "time"))) +(4 rows) + +\qecho time_bucket exclusion with date +time_bucket exclusion with date +:PREFIX SELECT * FROM metrics_date WHERE time_bucket('1d',time) < '2000-01-03' ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Index Only Scan Backward using _hyper_8_171_chunk_metrics_date_time_idx on _hyper_8_171_chunk + Index Cond: ("time" < '01-03-2000'::date) + Filter: (time_bucket('@ 1 day'::interval, "time") < '01-03-2000'::date) +(3 rows) + +:PREFIX SELECT * FROM metrics_date WHERE time_bucket('1d',time) >= '2000-01-03' AND time_bucket('1d',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_date + Order: metrics_date."time" + -> Index Only Scan Backward using _hyper_8_171_chunk_metrics_date_time_idx on _hyper_8_171_chunk + Index Cond: (("time" >= '01-03-2000'::date) AND ("time" <= '01-11-2000'::date)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= '01-03-2000'::date) AND (time_bucket('@ 1 day'::interval, "time") <= '01-10-2000'::date)) + -> Index Only Scan Backward using _hyper_8_172_chunk_metrics_date_time_idx on _hyper_8_172_chunk + Index Cond: (("time" >= '01-03-2000'::date) AND ("time" <= '01-11-2000'::date)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= '01-03-2000'::date) AND (time_bucket('@ 1 day'::interval, "time") <= '01-10-2000'::date)) +(8 rows) + +\qecho time_bucket exclusion with timestamp +time_bucket exclusion with timestamp +:PREFIX SELECT * FROM metrics_timestamp WHERE time_bucket('1d',time) < '2000-01-03' ORDER BY time; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Index Only Scan Backward using _hyper_5_155_chunk_metrics_timestamp_time_idx on _hyper_5_155_chunk + Index Cond: ("time" < 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) +(3 rows) + +:PREFIX SELECT * FROM metrics_timestamp WHERE time_bucket('1d',time) >= '2000-01-03' AND time_bucket('1d',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamp + Order: metrics_timestamp."time" + -> Index Only Scan Backward using _hyper_5_155_chunk_metrics_timestamp_time_idx on _hyper_5_155_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) AND ("time" <= 'Tue Jan 11 00:00:00 2000'::timestamp without time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) AND (time_bucket('@ 1 day'::interval, "time") <= 'Mon Jan 10 00:00:00 2000'::timestamp without time zone)) + -> Index Only Scan Backward using _hyper_5_156_chunk_metrics_timestamp_time_idx on _hyper_5_156_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) AND ("time" <= 'Tue Jan 11 00:00:00 2000'::timestamp without time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000'::timestamp without time zone) AND (time_bucket('@ 1 day'::interval, "time") <= 'Mon Jan 10 00:00:00 2000'::timestamp without time zone)) +(8 rows) + +\qecho time_bucket exclusion with timestamptz +time_bucket exclusion with timestamptz +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('6h',time) < '2000-01-03' ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: ("time" < 'Mon Jan 03 06:00:00 2000 PST'::timestamp with time zone) + Filter: (time_bucket('@ 6 hours'::interval, "time") < 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) +(3 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('6h',time) >= '2000-01-03' AND time_bucket('6h',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 10 06:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 6 hours'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 6 hours'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 10 06:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 6 hours'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 6 hours'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(8 rows) + +\qecho time_bucket exclusion with timestamptz and day interval +time_bucket exclusion with timestamptz and day interval +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1d',time) < '2000-01-03' ORDER BY time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: ("time" < 'Tue Jan 04 00:00:00 2000 PST'::timestamp with time zone) + Filter: (time_bucket('@ 1 day'::interval, "time") < 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) +(3 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1d',time) >= '2000-01-03' AND time_bucket('1d',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Tue Jan 11 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 1 day'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Tue Jan 11 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 1 day'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(8 rows) + +:PREFIX SELECT time FROM metrics_timestamptz WHERE time_bucket('1d',time) >= '2000-01-03' AND time_bucket('7d',time) <= '2000-01-10' ORDER BY time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics_timestamptz + Order: metrics_timestamptz."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 17 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 7 days'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 17 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 7 days'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk + Index Cond: (("time" >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND ("time" <= 'Mon Jan 17 00:00:00 2000 PST'::timestamp with time zone)) + Filter: ((time_bucket('@ 1 day'::interval, "time") >= 'Mon Jan 03 00:00:00 2000 PST'::timestamp with time zone) AND (time_bucket('@ 7 days'::interval, "time") <= 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(11 rows) + +\qecho no transformation +no transformation +:PREFIX SELECT * FROM hyper WHERE time_bucket(10 + floor(random())::int, time) > 10 AND time_bucket(10 + floor(random())::int, time) < 100 AND time < 150 ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper."time" + -> Custom Scan (ChunkAppend) on hyper + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_3_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_4_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_5_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_6_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_7_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_8_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_9_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_10_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_11_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_12_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_13_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_14_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) + -> Seq Scan on _hyper_1_15_chunk + Filter: (("time" < 150) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") > 10) AND (time_bucket(((10 + (floor(random()))::integer))::bigint, "time") < 100)) +(34 rows) + +\qecho exclude chunks based on time column with partitioning function. This +exclude chunks based on time column with partitioning function. This +\qecho transparently applies the time partitioning function on the time +transparently applies the time partitioning function on the time +\qecho value to be able to exclude chunks (similar to a closed dimension). +value to be able to exclude chunks (similar to a closed dimension). +:PREFIX SELECT * FROM hyper_timefunc WHERE time < 4 ORDER BY value; + QUERY PLAN +-------------------------------------------------------- + Sort + Sort Key: _hyper_4_124_chunk.value + -> Append + -> Seq Scan on _hyper_4_124_chunk + Filter: ("time" < '4'::double precision) + -> Seq Scan on _hyper_4_125_chunk + Filter: ("time" < '4'::double precision) + -> Seq Scan on _hyper_4_126_chunk + Filter: ("time" < '4'::double precision) + -> Seq Scan on _hyper_4_127_chunk + Filter: ("time" < '4'::double precision) +(11 rows) + +\qecho excluding based on time expression is currently unoptimized +excluding based on time expression is currently unoptimized +:PREFIX SELECT * FROM hyper_timefunc WHERE unix_to_timestamp(time) < 'Wed Dec 31 16:00:04 1969 PST' ORDER BY value; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_4_124_chunk.value + -> Append + -> Seq Scan on _hyper_4_124_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_125_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_126_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_127_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_128_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_129_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_130_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_131_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_132_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_133_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_134_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_135_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_136_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_137_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_138_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_139_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_140_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_141_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_142_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_143_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_144_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_145_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_146_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_147_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_148_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_149_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_150_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_151_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_152_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_153_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_4_154_chunk + Filter: (to_timestamp("time") < 'Wed Dec 31 16:00:04 1969 PST'::timestamp with time zone) +(65 rows) + +\qecho test qual propagation for joins +test qual propagation for joins +RESET constraint_exclusion; +\qecho nothing to propagate +nothing to propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1, metrics_timestamptz_2 m2 WHERE m1.time = m2.time ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(18 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(18 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(18 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: m1."time" + -> Merge Right Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(20 rows) + +\qecho OR constraints should not propagate +OR constraints should not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' OR m1.time > '2001-01-01' ORDER BY m1.time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Filter: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) OR ("time" > 'Mon Jan 01 00:00:00 2001 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Filter: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) OR ("time" > 'Mon Jan 01 00:00:00 2001 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(17 rows) + +\qecho test single constraint +test single constraint +\qecho constraint should be on both scans +constraint should be on both scans +\qecho these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +:PREFIX SELECT m1.time FROM metrics_timestamptz m1, metrics_timestamptz_2 m2 WHERE m1.time = m2.time AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(17 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +\qecho test 2 constraints on single relation +test 2 constraints on single relation +\qecho these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +:PREFIX SELECT m1.time FROM metrics_timestamptz m1, metrics_timestamptz_2 m2 WHERE m1.time = m2.time AND m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Append + -> Index Only Scan using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + Index Cond: ("time" = m1."time") +(20 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +\qecho test 2 constraints with 1 constraint on each relation +test 2 constraints with 1 constraint on each relation +\qecho these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +these will propagate even for LEFT/RIGHT JOIN because the constraints are not in the ON clause and therefore imply a NOT NULL condition on the JOIN column +:PREFIX SELECT m1.time FROM metrics_timestamptz m1, metrics_timestamptz_2 m2 WHERE m1.time = m2.time AND m1.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) AND ("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +\qecho test constraints in ON clause of INNER JOIN +test constraints in ON clause of INNER JOIN +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(15 rows) + +\qecho test constraints in ON clause of LEFT JOIN +test constraints in ON clause of LEFT JOIN +\qecho must not propagate +must not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 LEFT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Merge Left Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(16 rows) + +\qecho test constraints in ON clause of RIGHT JOIN +test constraints in ON clause of RIGHT JOIN +\qecho must not propagate +must not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 RIGHT JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time AND m2.time > '2000-01-01' AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: m1_1."time" + -> Parallel Hash Left Join + Hash Cond: (m2_1."time" = m1_1."time") + Join Filter: ((m2_1."time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND (m2_1."time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Parallel Append + -> Parallel Seq Scan on _hyper_7_165_chunk m2_1 + -> Parallel Seq Scan on _hyper_7_166_chunk m2_2 + -> Parallel Seq Scan on _hyper_7_167_chunk m2_3 + -> Parallel Seq Scan on _hyper_7_168_chunk m2_4 + -> Parallel Seq Scan on _hyper_7_169_chunk m2_5 + -> Parallel Seq Scan on _hyper_7_170_chunk m2_6 + -> Parallel Hash + -> Parallel Append + -> Parallel Seq Scan on _hyper_6_160_chunk m1_1 + -> Parallel Seq Scan on _hyper_6_161_chunk m1_2 + -> Parallel Seq Scan on _hyper_6_162_chunk m1_3 + -> Parallel Seq Scan on _hyper_6_163_chunk m1_4 + -> Parallel Seq Scan on _hyper_6_164_chunk m1_5 +(21 rows) + +\qecho test equality condition not in ON clause +test equality condition not in ON clause +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON true WHERE m2.time = m1.time AND m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(15 rows) + +\qecho test constraints not joined on +test constraints not joined on +\qecho device_id constraint must not propagate +device_id constraint must not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON true WHERE m2.time = m1.time AND m2.time < '2000-01-10' AND m1.device_id = 1 ORDER BY m1.time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Nested Loop + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Append + -> Index Only Scan using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" = m1."time") AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" = m1."time") AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) +(14 rows) + +\qecho test multiple join conditions +test multiple join conditions +\qecho device_id constraint should propagate +device_id constraint should propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON true WHERE m2.time = m1.time AND m1.device_id = m2.device_id AND m2.time < '2000-01-10' AND m1.device_id = 1 ORDER BY m1.time; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Nested Loop + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: ((device_id = 1) AND (device_id = 1)) + -> Index Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: ((device_id = 1) AND (device_id = 1)) + -> Append + -> Index Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" = m2."time") AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Filter: (device_id = 1) + -> Index Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" = m2."time") AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + Filter: (device_id = 1) +(16 rows) + +\qecho test join with 3 tables +test join with 3 tables +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time INNER JOIN metrics_timestamptz m3 ON m2.time=m3.time WHERE m1.time > '2000-01-01' AND m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + -> Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: (("time" > 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone) AND ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> Append + -> Index Only Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m3_1 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m3_2 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m3_3 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m3_4 + Index Cond: ("time" = m1."time") + -> Index Only Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m3_5 + Index Cond: ("time" = m1."time") +(27 rows) + +\qecho test non-Const constraints +test non-Const constraints +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10'::text::timestamptz ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + Chunks excluded during startup: 3 + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + Chunks excluded during startup: 4 + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < ('2000-01-10'::cstring)::timestamp with time zone) +(17 rows) + +\qecho test now() +test now() +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < now() ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + Chunks excluded during startup: 0 + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + Index Cond: ("time" < now()) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + Chunks excluded during startup: 0 + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + Index Cond: ("time" < now()) + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + Index Cond: ("time" < now()) +(31 rows) + +\qecho test volatile function +test volatile function +\qecho should not propagate +should not propagate +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m1.time < clock_timestamp() ORDER BY m1.time; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + Chunks excluded during startup: 0 + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 + Filter: ("time" < clock_timestamp()) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 +(24 rows) + +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN metrics_timestamptz_2 m2 ON m1.time = m2.time WHERE m2.time < clock_timestamp() ORDER BY m1.time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m2."time" = m1."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 m2 + Order: m2."time" + Chunks excluded during startup: 0 + -> Index Only Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk m2_1 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk m2_2 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_167_chunk_metrics_timestamptz_2_time_idx on _hyper_7_167_chunk m2_3 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk m2_4 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk m2_5 + Filter: ("time" < clock_timestamp()) + -> Index Only Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk m2_6 + Filter: ("time" < clock_timestamp()) + -> Materialize + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + -> Index Only Scan Backward using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk m1_3 + -> Index Only Scan Backward using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk m1_4 + -> Index Only Scan Backward using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk m1_5 +(25 rows) + +\qecho test JOINs with normal table +test JOINs with normal table +\qecho will not propagate because constraints are only added to hypertables +will not propagate because constraints are only added to hypertables +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN regular_timestamptz m2 ON m1.time = m2.time WHERE m1.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Sort + Sort Key: m2."time" + -> Seq Scan on regular_timestamptz m2 +(11 rows) + +\qecho test JOINs with normal table +test JOINs with normal table +:PREFIX SELECT m1.time FROM metrics_timestamptz m1 INNER JOIN regular_timestamptz m2 ON m1.time = m2.time WHERE m2.time < '2000-01-10' ORDER BY m1.time; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (m1."time" = m2."time") + -> Custom Scan (ChunkAppend) on metrics_timestamptz m1 + Order: m1."time" + -> Index Only Scan Backward using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk m1_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk m1_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Sort + Sort Key: m2."time" + -> Seq Scan on regular_timestamptz m2 + Filter: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) +(12 rows) + +\qecho test quals are not pushed into OUTER JOIN +test quals are not pushed into OUTER JOIN +CREATE TABLE outer_join_1 (id int, name text,time timestamptz NOT NULL DEFAULT '2000-01-01'); +CREATE TABLE outer_join_2 (id int, name text,time timestamptz NOT NULL DEFAULT '2000-01-01'); +SELECT (SELECT table_name FROM create_hypertable(tbl, 'time')) FROM (VALUES ('outer_join_1'),('outer_join_2')) v(tbl); + table_name +-------------- + outer_join_1 + outer_join_2 +(2 rows) + +INSERT INTO outer_join_1 VALUES(1,'a'), (2,'b'); +INSERT INTO outer_join_2 VALUES(1,'a'); +:PREFIX SELECT one.id, two.name FROM outer_join_1 one LEFT OUTER JOIN outer_join_2 two ON one.id=two.id WHERE one.id=2; + QUERY PLAN +------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on _hyper_9_176_chunk one + Filter: (id = 2) + -> Materialize + -> Seq Scan on _hyper_10_177_chunk two + Filter: (id = 2) +(6 rows) + +:PREFIX SELECT one.id, two.name FROM outer_join_2 two RIGHT OUTER JOIN outer_join_1 one ON one.id=two.id WHERE one.id=2; + QUERY PLAN +------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on _hyper_9_176_chunk one + Filter: (id = 2) + -> Materialize + -> Seq Scan on _hyper_10_177_chunk two + Filter: (id = 2) +(6 rows) + +DROP TABLE outer_join_1; +DROP TABLE outer_join_2; +-- test UNION between regular table and hypertable +SELECT time FROM regular_timestamptz UNION SELECT time FROM metrics_timestamptz ORDER BY 1; + time +------------------------------ + Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST +(32 rows) + +-- test UNION ALL between regular table and hypertable +SELECT time FROM regular_timestamptz UNION ALL SELECT time FROM metrics_timestamptz ORDER BY 1; + time +------------------------------ + Sat Jan 01 00:00:00 2000 PST + Sat Jan 01 00:00:00 2000 PST + Sat Jan 01 00:00:00 2000 PST + Sat Jan 01 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Sun Jan 02 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Mon Jan 03 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Tue Jan 04 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Wed Jan 05 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Thu Jan 06 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Fri Jan 07 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sat Jan 08 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Sun Jan 09 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Mon Jan 10 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Tue Jan 11 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Wed Jan 12 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Thu Jan 13 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Fri Jan 14 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sat Jan 15 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Sun Jan 16 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Mon Jan 17 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Tue Jan 18 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Wed Jan 19 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Thu Jan 20 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Fri Jan 21 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sat Jan 22 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Sun Jan 23 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Mon Jan 24 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Tue Jan 25 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Wed Jan 26 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Thu Jan 27 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Fri Jan 28 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sat Jan 29 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Sun Jan 30 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Mon Jan 31 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST + Tue Feb 01 00:00:00 2000 PST +(128 rows) + +-- test nested join qual propagation +:PREFIX +SELECT * FROM ( +SELECT o1_m1.time FROM metrics_timestamptz o1_m1 INNER JOIN metrics_timestamptz_2 o1_m2 ON true WHERE o1_m2.time = o1_m1.time AND o1_m1.device_id = o1_m2.device_id AND o1_m2.time < '2000-01-10' AND o1_m1.device_id = 1 +) o1 FULL OUTER JOIN ( +SELECT o2_m1.time FROM metrics_timestamptz o2_m1 FULL OUTER JOIN metrics_timestamptz_2 o2_m2 ON true WHERE o2_m2.time = o2_m1.time AND o2_m1.device_id = o2_m2.device_id AND o2_m2.time > '2000-01-20' AND o2_m1.device_id = 2 +) o2 ON o1.time = o2.time ORDER BY 1,2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: o1_m1_1."time", o2_m1_1."time" + -> Merge Full Join + Merge Cond: (o2_m1_1."time" = o1_m1_1."time") + -> Nested Loop + -> Merge Append + Sort Key: o2_m2_1."time" + -> Index Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk o2_m2_1 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk o2_m2_2 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk o2_m2_3 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Append + -> Index Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk o2_m1_1 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk o2_m1_2 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk o2_m1_3 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk o2_m1_4 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk o2_m1_5 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Materialize + -> Nested Loop + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 o1_m2 + Order: o1_m2."time" + -> Index Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk o1_m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk o1_m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Append + -> Index Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk o1_m1_1 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk o1_m1_2 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk o1_m1_3 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk o1_m1_4 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk o1_m1_5 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) +(58 rows) + +:PREFIX +SELECT * FROM ( +SELECT o1_m1.time FROM metrics_timestamptz o1_m1 INNER JOIN metrics_timestamptz_2 o1_m2 ON o1_m2.time = o1_m1.time AND o1_m1.device_id = o1_m2.device_id WHERE o1_m2.time < '2000-01-10' AND o1_m1.device_id = 1 +) o1 FULL OUTER JOIN ( +SELECT o2_m1.time FROM metrics_timestamptz o2_m1 FULL OUTER JOIN metrics_timestamptz_2 o2_m2 ON o2_m2.time = o2_m1.time AND o2_m1.device_id = o2_m2.device_id WHERE o2_m2.time > '2000-01-20' AND o2_m1.device_id = 2 +) o2 ON o1.time = o2.time ORDER BY 1,2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: o1_m1_1."time", o2_m1_1."time" + -> Merge Full Join + Merge Cond: (o2_m1_1."time" = o1_m1_1."time") + -> Nested Loop + -> Merge Append + Sort Key: o2_m2_1."time" + -> Index Scan Backward using _hyper_7_168_chunk_metrics_timestamptz_2_time_idx on _hyper_7_168_chunk o2_m2_1 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_7_169_chunk_metrics_timestamptz_2_time_idx on _hyper_7_169_chunk o2_m2_2 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Index Scan Backward using _hyper_7_170_chunk_metrics_timestamptz_2_time_idx on _hyper_7_170_chunk o2_m2_3 + Index Cond: ("time" > 'Thu Jan 20 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 2) + -> Append + -> Index Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk o2_m1_1 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk o2_m1_2 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk o2_m1_3 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk o2_m1_4 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Index Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk o2_m1_5 + Index Cond: ("time" = o2_m2_1."time") + Filter: (device_id = 2) + -> Materialize + -> Nested Loop + -> Custom Scan (ChunkAppend) on metrics_timestamptz_2 o1_m2 + Order: o1_m2."time" + -> Index Scan Backward using _hyper_7_165_chunk_metrics_timestamptz_2_time_idx on _hyper_7_165_chunk o1_m2_1 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Index Scan Backward using _hyper_7_166_chunk_metrics_timestamptz_2_time_idx on _hyper_7_166_chunk o1_m2_2 + Index Cond: ("time" < 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + Filter: (device_id = 1) + -> Append + -> Index Scan using _hyper_6_160_chunk_metrics_timestamptz_time_idx on _hyper_6_160_chunk o1_m1_1 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_161_chunk_metrics_timestamptz_time_idx on _hyper_6_161_chunk o1_m1_2 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_162_chunk_metrics_timestamptz_time_idx on _hyper_6_162_chunk o1_m1_3 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_163_chunk_metrics_timestamptz_time_idx on _hyper_6_163_chunk o1_m1_4 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) + -> Index Scan using _hyper_6_164_chunk_metrics_timestamptz_time_idx on _hyper_6_164_chunk o1_m1_5 + Index Cond: ("time" = o1_m2."time") + Filter: (device_id = 1) +(58 rows) + +\ir include/plan_expand_hypertable_chunks_in_query.sql +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +--we want to see how our logic excludes chunks +--and not how much work constraint_exclusion does +SET constraint_exclusion = 'off'; +:PREFIX SELECT * FROM hyper ORDER BY value; + QUERY PLAN +-------------------------------------------- + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + -> Seq Scan on _hyper_1_2_chunk + -> Seq Scan on _hyper_1_3_chunk + -> Seq Scan on _hyper_1_4_chunk + -> Seq Scan on _hyper_1_5_chunk + -> Seq Scan on _hyper_1_6_chunk + -> Seq Scan on _hyper_1_7_chunk + -> Seq Scan on _hyper_1_8_chunk + -> Seq Scan on _hyper_1_9_chunk + -> Seq Scan on _hyper_1_10_chunk + -> Seq Scan on _hyper_1_11_chunk + -> Seq Scan on _hyper_1_12_chunk + -> Seq Scan on _hyper_1_13_chunk + -> Seq Scan on _hyper_1_14_chunk + -> Seq Scan on _hyper_1_15_chunk + -> Seq Scan on _hyper_1_16_chunk + -> Seq Scan on _hyper_1_17_chunk + -> Seq Scan on _hyper_1_18_chunk + -> Seq Scan on _hyper_1_19_chunk + -> Seq Scan on _hyper_1_20_chunk + -> Seq Scan on _hyper_1_21_chunk + -> Seq Scan on _hyper_1_22_chunk + -> Seq Scan on _hyper_1_23_chunk + -> Seq Scan on _hyper_1_24_chunk + -> Seq Scan on _hyper_1_25_chunk + -> Seq Scan on _hyper_1_26_chunk + -> Seq Scan on _hyper_1_27_chunk + -> Seq Scan on _hyper_1_28_chunk + -> Seq Scan on _hyper_1_29_chunk + -> Seq Scan on _hyper_1_30_chunk + -> Seq Scan on _hyper_1_31_chunk + -> Seq Scan on _hyper_1_32_chunk + -> Seq Scan on _hyper_1_33_chunk + -> Seq Scan on _hyper_1_34_chunk + -> Seq Scan on _hyper_1_35_chunk + -> Seq Scan on _hyper_1_36_chunk + -> Seq Scan on _hyper_1_37_chunk + -> Seq Scan on _hyper_1_38_chunk + -> Seq Scan on _hyper_1_39_chunk + -> Seq Scan on _hyper_1_40_chunk + -> Seq Scan on _hyper_1_41_chunk + -> Seq Scan on _hyper_1_42_chunk + -> Seq Scan on _hyper_1_43_chunk + -> Seq Scan on _hyper_1_44_chunk + -> Seq Scan on _hyper_1_45_chunk + -> Seq Scan on _hyper_1_46_chunk + -> Seq Scan on _hyper_1_47_chunk + -> Seq Scan on _hyper_1_48_chunk + -> Seq Scan on _hyper_1_49_chunk + -> Seq Scan on _hyper_1_50_chunk + -> Seq Scan on _hyper_1_51_chunk + -> Seq Scan on _hyper_1_52_chunk + -> Seq Scan on _hyper_1_53_chunk + -> Seq Scan on _hyper_1_54_chunk + -> Seq Scan on _hyper_1_55_chunk + -> Seq Scan on _hyper_1_56_chunk + -> Seq Scan on _hyper_1_57_chunk + -> Seq Scan on _hyper_1_58_chunk + -> Seq Scan on _hyper_1_59_chunk + -> Seq Scan on _hyper_1_60_chunk + -> Seq Scan on _hyper_1_61_chunk + -> Seq Scan on _hyper_1_62_chunk + -> Seq Scan on _hyper_1_63_chunk + -> Seq Scan on _hyper_1_64_chunk + -> Seq Scan on _hyper_1_65_chunk + -> Seq Scan on _hyper_1_66_chunk + -> Seq Scan on _hyper_1_67_chunk + -> Seq Scan on _hyper_1_68_chunk + -> Seq Scan on _hyper_1_69_chunk + -> Seq Scan on _hyper_1_70_chunk + -> Seq Scan on _hyper_1_71_chunk + -> Seq Scan on _hyper_1_72_chunk + -> Seq Scan on _hyper_1_73_chunk + -> Seq Scan on _hyper_1_74_chunk + -> Seq Scan on _hyper_1_75_chunk + -> Seq Scan on _hyper_1_76_chunk + -> Seq Scan on _hyper_1_77_chunk + -> Seq Scan on _hyper_1_78_chunk + -> Seq Scan on _hyper_1_79_chunk + -> Seq Scan on _hyper_1_80_chunk + -> Seq Scan on _hyper_1_81_chunk + -> Seq Scan on _hyper_1_82_chunk + -> Seq Scan on _hyper_1_83_chunk + -> Seq Scan on _hyper_1_84_chunk + -> Seq Scan on _hyper_1_85_chunk + -> Seq Scan on _hyper_1_86_chunk + -> Seq Scan on _hyper_1_87_chunk + -> Seq Scan on _hyper_1_88_chunk + -> Seq Scan on _hyper_1_89_chunk + -> Seq Scan on _hyper_1_90_chunk + -> Seq Scan on _hyper_1_91_chunk + -> Seq Scan on _hyper_1_92_chunk + -> Seq Scan on _hyper_1_93_chunk + -> Seq Scan on _hyper_1_94_chunk + -> Seq Scan on _hyper_1_95_chunk + -> Seq Scan on _hyper_1_96_chunk + -> Seq Scan on _hyper_1_97_chunk + -> Seq Scan on _hyper_1_98_chunk + -> Seq Scan on _hyper_1_99_chunk + -> Seq Scan on _hyper_1_100_chunk + -> Seq Scan on _hyper_1_101_chunk + -> Seq Scan on _hyper_1_102_chunk +(105 rows) + +-- explicit chunk exclusion +:PREFIX SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[1,2]) ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + -> Seq Scan on _hyper_1_2_chunk +(5 rows) + +:PREFIX SELECT * FROM (SELECT * FROM hyper h WHERE _timescaledb_functions.chunks_in(h, ARRAY[1,2,3])) T ORDER BY value; + QUERY PLAN +---------------------------------------------- + Sort + Sort Key: h_1.value + -> Append + -> Seq Scan on _hyper_1_1_chunk h_1 + -> Seq Scan on _hyper_1_2_chunk h_2 + -> Seq Scan on _hyper_1_3_chunk h_3 +(6 rows) + +:PREFIX SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[1,2,3]) AND time < 10 ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: _hyper_1_1_chunk.value + -> Append + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_1_2_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_1_3_chunk + Filter: ("time" < 10) +(9 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE device_id = 'dev1' AND time < to_timestamp(10) AND _timescaledb_functions.chunks_in(hyper_ts, ARRAY[116]) ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: _hyper_3_116_chunk.value + -> Seq Scan on _hyper_3_116_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(4 rows) + +:PREFIX SELECT * FROM hyper_ts h JOIN tag on (h.tag_id = tag.id ) WHERE _timescaledb_functions.chunks_in(h, ARRAY[116]) AND time < to_timestamp(10) AND device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: h.value + -> Merge Join + Merge Cond: (tag.id = h.tag_id) + -> Index Scan using tag_pkey on tag + -> Sort + Sort Key: h.tag_id + -> Seq Scan on _hyper_3_116_chunk h + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(9 rows) + +:PREFIX SELECT * FROM hyper_w_space h1 JOIN hyper_ts h2 ON h1.device_id=h2.device_id WHERE _timescaledb_functions.chunks_in(h1, ARRAY[104,105]) AND _timescaledb_functions.chunks_in(h2, ARRAY[116,117]) ORDER BY h1.value; + QUERY PLAN +------------------------------------------------------------- + Sort + Sort Key: h1_1.value + -> Hash Join + Hash Cond: (h2_1.device_id = h1_1.device_id) + -> Append + -> Seq Scan on _hyper_3_116_chunk h2_1 + -> Seq Scan on _hyper_3_117_chunk h2_2 + -> Hash + -> Append + -> Seq Scan on _hyper_2_104_chunk h1_1 + -> Seq Scan on _hyper_2_105_chunk h1_2 +(11 rows) + +:PREFIX SELECT * FROM hyper_w_space h1 JOIN hyper_ts h2 ON h1.device_id=h2.device_id AND _timescaledb_functions.chunks_in(h2, ARRAY[116,117]) WHERE _timescaledb_functions.chunks_in(h1, ARRAY[104,105]) ORDER BY h1.value; + QUERY PLAN +------------------------------------------------------------- + Sort + Sort Key: h1_1.value + -> Hash Join + Hash Cond: (h2_1.device_id = h1_1.device_id) + -> Append + -> Seq Scan on _hyper_3_116_chunk h2_1 + -> Seq Scan on _hyper_3_117_chunk h2_2 + -> Hash + -> Append + -> Seq Scan on _hyper_2_104_chunk h1_1 + -> Seq Scan on _hyper_2_105_chunk h1_2 +(11 rows) + +:PREFIX SELECT * FROM hyper h1, hyper h2 WHERE _timescaledb_functions.chunks_in(h1, ARRAY[1,2]) AND _timescaledb_functions.chunks_in(h2, ARRAY[2,3]); + QUERY PLAN +----------------------------------------------------- + Nested Loop + -> Append + -> Seq Scan on _hyper_1_1_chunk h1_1 + -> Seq Scan on _hyper_1_2_chunk h1_2 + -> Materialize + -> Append + -> Seq Scan on _hyper_1_2_chunk h2_1 + -> Seq Scan on _hyper_1_3_chunk h2_2 +(8 rows) + +SET enable_seqscan=false; +-- Should perform index-only scan. Since we pass whole row into the function it might block planner from using index-only scan. +-- But since we'll remove the function from the query tree before planner decision it shouldn't affect index-only decision. +:PREFIX SELECT time FROM hyper WHERE time=0 AND _timescaledb_functions.chunks_in(hyper, ARRAY[1]); + QUERY PLAN +--------------------------------------------------------------------------- + Index Only Scan using _hyper_1_1_chunk_hyper_time_idx on _hyper_1_1_chunk + Index Cond: ("time" = 0) +(2 rows) + +:PREFIX SELECT first(value, time) FROM hyper h WHERE _timescaledb_functions.chunks_in(h, ARRAY[1]); + QUERY PLAN +----------------------------------------------------------------------------------------------- + Result + InitPlan 1 (returns $0) + -> Limit + -> Index Scan Backward using _hyper_1_1_chunk_hyper_time_idx on _hyper_1_1_chunk h + Index Cond: ("time" IS NOT NULL) +(5 rows) + +\set ON_ERROR_STOP 0 +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[1,2]) AND _timescaledb_functions.chunks_in(hyper, ARRAY[2,3]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:26: ERROR: illegal invocation of chunks_in function +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(2, ARRAY[1]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:27: ERROR: function _timescaledb_functions.chunks_in(integer, integer[]) does not exist at character 27 +SELECT * FROM hyper WHERE time < 10 OR _timescaledb_functions.chunks_in(hyper, ARRAY[1,2]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:28: ERROR: illegal invocation of chunks_in function +SELECT _timescaledb_functions.chunks_in(hyper, ARRAY[1,2]) FROM hyper; +psql:include/plan_expand_hypertable_chunks_in_query.sql:29: ERROR: illegal invocation of chunks_in function +-- non existing chunk id +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[123456789]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:31: ERROR: chunk id 123456789 not found +-- chunk that belongs to another hypertable +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(hyper, ARRAY[104]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:33: ERROR: chunk id 104 does not belong to hypertable "hyper" +-- passing wrong row ref +SELECT * FROM hyper WHERE _timescaledb_functions.chunks_in(ROW(1,2), ARRAY[104]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:35: ERROR: first parameter for chunks_in function needs to be record +-- passing func as chunk id +SELECT * FROM hyper h WHERE _timescaledb_functions.chunks_in(h, array_append(ARRAY[1],current_setting('server_version_num')::int)); +psql:include/plan_expand_hypertable_chunks_in_query.sql:37: ERROR: second argument to chunk_in should contain only integer consts +-- NULL chunk IDs not allowed in chunk array +SELECT * FROM hyper h WHERE _timescaledb_functions.chunks_in(h, ARRAY[NULL::int]); +psql:include/plan_expand_hypertable_chunks_in_query.sql:39: ERROR: chunk id can't be NULL +\set ON_ERROR_STOP 1 +-- chunks_in is STRICT function and for NULL arguments a null result is returned +SELECT * FROM hyper h WHERE _timescaledb_functions.chunks_in(h, NULL); + value | time +-------+------ +(0 rows) + +\set ECHO errors +RESET timescaledb.enable_optimizations; +CREATE TABLE t(time timestamptz NOT NULL); +SELECT table_name FROM create_hypertable('t','time'); + table_name +------------ + t +(1 row) + +INSERT INTO t VALUES ('2000-01-01'), ('2010-01-01'), ('2020-01-01'); +EXPLAIN (costs off) SELECT * FROM t t1 INNER JOIN t t2 ON t1.time = t2.time WHERE t1.time < timestamptz '2010-01-01'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (t1_1."time" = t2_1."time") + -> Merge Append + Sort Key: t1_1."time" + -> Index Only Scan Backward using _hyper_15_182_chunk_t_time_idx on _hyper_15_182_chunk t1_1 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_15_183_chunk_t_time_idx on _hyper_15_183_chunk t1_2 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Materialize + -> Merge Append + Sort Key: t2_1."time" + -> Index Only Scan Backward using _hyper_15_182_chunk_t_time_idx on _hyper_15_182_chunk t2_1 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_15_183_chunk_t_time_idx on _hyper_15_183_chunk t2_2 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) +(15 rows) + +SET timescaledb.enable_qual_propagation TO false; +EXPLAIN (costs off) SELECT * FROM t t1 INNER JOIN t t2 ON t1.time = t2.time WHERE t1.time < timestamptz '2010-01-01'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Merge Join + Merge Cond: (t1_1."time" = t2_1."time") + -> Merge Append + Sort Key: t1_1."time" + -> Index Only Scan Backward using _hyper_15_182_chunk_t_time_idx on _hyper_15_182_chunk t1_1 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Index Only Scan Backward using _hyper_15_183_chunk_t_time_idx on _hyper_15_183_chunk t1_2 + Index Cond: ("time" < 'Fri Jan 01 00:00:00 2010 PST'::timestamp with time zone) + -> Materialize + -> Merge Append + Sort Key: t2_1."time" + -> Index Only Scan Backward using _hyper_15_182_chunk_t_time_idx on _hyper_15_182_chunk t2_1 + -> Index Only Scan Backward using _hyper_15_183_chunk_t_time_idx on _hyper_15_183_chunk t2_2 + -> Index Only Scan Backward using _hyper_15_184_chunk_t_time_idx on _hyper_15_184_chunk t2_3 +(14 rows) + +RESET timescaledb.enable_qual_propagation; +CREATE TABLE test (a int, time timestamptz NOT NULL); +SELECT table_name FROM create_hypertable('public.test', 'time'); + table_name +------------ + test +(1 row) + +INSERT INTO test SELECT i, '2020-04-01'::date-10-i from generate_series(1,20) i; +CREATE OR REPLACE FUNCTION test_f(_ts timestamptz) +RETURNS SETOF test LANGUAGE SQL STABLE PARALLEL SAFE +AS $f$ + SELECT DISTINCT ON (a) * FROM test WHERE time >= _ts ORDER BY a, time DESC +$f$; +EXPLAIN (costs off) SELECT * FROM test_f(now()); + QUERY PLAN +------------------------------------------------- + Unique + -> Sort + Sort Key: test.a, test."time" DESC + -> Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 4 +(5 rows) + +EXPLAIN (costs off) SELECT * FROM test_f(now()); + QUERY PLAN +------------------------------------------------- + Unique + -> Sort + Sort Key: test.a, test."time" DESC + -> Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 4 +(5 rows) + +CREATE TABLE t1 (a int, b int NOT NULL); +SELECT create_hypertable('t1', 'b', chunk_time_interval=>10); + create_hypertable +------------------- + (17,public,t1,t) +(1 row) + +CREATE TABLE t2 (a int, b int NOT NULL); +SELECT create_hypertable('t2', 'b', chunk_time_interval=>10); + create_hypertable +------------------- + (18,public,t2,t) +(1 row) + +CREATE OR REPLACE FUNCTION f_t1(_a int, _b int) + RETURNS SETOF t1 + LANGUAGE SQL + STABLE PARALLEL SAFE +AS $function$ + SELECT DISTINCT ON (a) * FROM t1 WHERE a = _a and b = _b ORDER BY a, b DESC +$function$ +; +CREATE OR REPLACE FUNCTION f_t2(_a int, _b int) RETURNS SETOF t2 LANGUAGE sql STABLE PARALLEL SAFE +AS $function$ + SELECT DISTINCT ON (j.a) j.* + FROM + f_t1(_a, _b) sc, + t2 j + WHERE + j.b = _b AND + j.a = _a + ORDER BY j.a, j.b DESC +$function$ +; +CREATE OR REPLACE FUNCTION f_t1_2(_b int) RETURNS SETOF t1 LANGUAGE SQL STABLE PARALLEL SAFE +AS $function$ + SELECT DISTINCT ON (j.a) jt.* FROM t1 j, f_t1(j.a, _b) jt +$function$; +EXPLAIN (costs off) SELECT * FROM f_t1_2(10); + QUERY PLAN +--------------------------------------------------------------- + Subquery Scan on f_t1_2 + -> Unique + -> Sort + Sort Key: j.a + -> Nested Loop + -> Seq Scan on t1 j + -> Limit + -> Index Scan using t1_b_idx on t1 + Index Cond: (b = 10) + Filter: (a = j.a) +(10 rows) + +EXPLAIN (costs off) SELECT * FROM f_t1_2(10) sc, f_t2(sc.a, 10); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Unique + -> Sort + Sort Key: j.a + -> Nested Loop + -> Seq Scan on t1 j + -> Limit + -> Index Scan using t1_b_idx on t1 + Index Cond: (b = 10) + Filter: (a = j.a) + -> Limit + -> Nested Loop + -> Limit + -> Index Scan using t1_b_idx on t1 t1_1 + Index Cond: (b = 10) + Filter: (a = t1.a) + -> Index Scan using t2_b_idx on t2 j_1 + Index Cond: (b = 10) + Filter: (a = t1.a) +(19 rows) + +--TEST END-- diff --git a/test/expected/plan_ordered_append.out b/test/expected/plan_ordered_append-13.out similarity index 100% rename from test/expected/plan_ordered_append.out rename to test/expected/plan_ordered_append-13.out diff --git a/test/expected/plan_ordered_append-14.out b/test/expected/plan_ordered_append-14.out new file mode 100644 index 00000000000..24bf1c01da4 --- /dev/null +++ b/test/expected/plan_ordered_append-14.out @@ -0,0 +1,629 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- we run these with analyze to confirm that nodes that are not +-- needed to fulfill the limit are not executed +-- unfortunately this doesn't work on PostgreSQL 9.6 which lacks +-- the ability to turn off analyze timing summary so we run +-- them without ANALYZE on PostgreSQL 9.6, but since LATERAL plans +-- are different across versions we need version specific output +-- here anyway. +\set TEST_BASE_NAME plan_ordered_append +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') as "TEST_LOAD_NAME", + format('include/%s_query.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME", + format('%s/results/%s_results_optimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_OPTIMIZED", + format('%s/results/%s_results_unoptimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNOPTIMIZED" +\gset +SELECT format('\! diff -u --label "Unoptimized result" --label "Optimized result" %s %s', :'TEST_RESULTS_UNOPTIMIZED', :'TEST_RESULTS_OPTIMIZED') as "DIFF_CMD" +\gset +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +\set PREFIX_NO_ANALYZE 'EXPLAIN (costs off)' +\ir :TEST_LOAD_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- create a now() function for repeatable testing that always returns +-- the same timestamp. It needs to be marked STABLE +CREATE OR REPLACE FUNCTION now_s() +RETURNS timestamptz LANGUAGE PLPGSQL STABLE AS +$BODY$ +BEGIN + RETURN '2000-01-08T0:00:00+0'::timestamptz; +END; +$BODY$; +CREATE TABLE devices(device_id INT PRIMARY KEY, name TEXT); +INSERT INTO devices VALUES +(1,'Device 1'), +(2,'Device 2'), +(3,'Device 3'); +-- create a second table where we create chunks in reverse order +CREATE TABLE ordered_append_reverse(time timestamptz NOT NULL, device_id INT, value float); +SELECT create_hypertable('ordered_append_reverse','time'); + create_hypertable +------------------------------------- + (1,public,ordered_append_reverse,t) +(1 row) + +INSERT INTO ordered_append_reverse SELECT generate_series('2000-01-18'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 0.5; +-- table where dimension column is last column +CREATE TABLE IF NOT EXISTS dimension_last( + id INT8 NOT NULL, + device_id INT NOT NULL, + name TEXT NOT NULL, + time timestamptz NOT NULL +); +SELECT create_hypertable('dimension_last', 'time', chunk_time_interval => interval '1day', if_not_exists => True); + create_hypertable +----------------------------- + (2,public,dimension_last,t) +(1 row) + +-- table with only dimension column +CREATE TABLE IF NOT EXISTS dimension_only( + time timestamptz NOT NULL +); +SELECT create_hypertable('dimension_only', 'time', chunk_time_interval => interval '1day', if_not_exists => True); + create_hypertable +----------------------------- + (3,public,dimension_only,t) +(1 row) + +INSERT INTO dimension_last SELECT 1,1,'Device 1',generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-04 23:59:00+0'::timestamptz,'1m'::interval); +INSERT INTO dimension_only VALUES +('2000-01-01'), +('2000-01-03'), +('2000-01-05'), +('2000-01-07'); +ANALYZE devices; +ANALYZE ordered_append_reverse; +ANALYZE dimension_last; +ANALYZE dimension_only; +-- create hypertable with indexes not on all chunks +CREATE TABLE ht_missing_indexes(time timestamptz NOT NULL, device_id int, value float); +SELECT create_hypertable('ht_missing_indexes','time'); + create_hypertable +--------------------------------- + (4,public,ht_missing_indexes,t) +(1 row) + +INSERT INTO ht_missing_indexes SELECT generate_series('2000-01-01'::timestamptz,'2000-01-18'::timestamptz,'1m'::interval), 1, 0.5; +INSERT INTO ht_missing_indexes SELECT generate_series('2000-01-01'::timestamptz,'2000-01-18'::timestamptz,'1m'::interval), 2, 1.5; +INSERT INTO ht_missing_indexes SELECT generate_series('2000-01-01'::timestamptz,'2000-01-18'::timestamptz,'1m'::interval), 3, 2.5; +-- drop index from 2nd chunk of ht_missing_indexes +SELECT format('%I.%I',i.schemaname,i.indexname) AS "INDEX_NAME" +FROM _timescaledb_catalog.chunk c +INNER JOIN _timescaledb_catalog.hypertable ht ON c.hypertable_id = ht.id +INNER JOIN pg_indexes i ON i.schemaname = c.schema_name AND i.tablename=c.table_name +WHERE ht.table_name = 'ht_missing_indexes' +ORDER BY c.id LIMIT 1 OFFSET 1 \gset +DROP INDEX :INDEX_NAME; +ANALYZE ht_missing_indexes; +-- create hypertable with with dropped columns +CREATE TABLE ht_dropped_columns(c1 int, c2 int, c3 int, c4 int, c5 int, time timestamptz NOT NULL, device_id int, value float); +SELECT create_hypertable('ht_dropped_columns','time'); + create_hypertable +--------------------------------- + (5,public,ht_dropped_columns,t) +(1 row) + +ALTER TABLE ht_dropped_columns DROP COLUMN c1; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-01'::timestamptz,'2000-01-02'::timestamptz,'1m'::interval), 1, 0.5; +ALTER TABLE ht_dropped_columns DROP COLUMN c2; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-08'::timestamptz,'2000-01-09'::timestamptz,'1m'::interval), 1, 0.5; +ALTER TABLE ht_dropped_columns DROP COLUMN c3; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-15'::timestamptz,'2000-01-16'::timestamptz,'1m'::interval), 1, 0.5; +ALTER TABLE ht_dropped_columns DROP COLUMN c4; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-22'::timestamptz,'2000-01-23'::timestamptz,'1m'::interval), 1, 0.5; +ALTER TABLE ht_dropped_columns DROP COLUMN c5; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-29'::timestamptz,'2000-01-30'::timestamptz,'1m'::interval), 1, 0.5; +ANALYZE ht_dropped_columns; +CREATE TABLE space2(time timestamptz NOT NULL, device_id int NOT NULL, tag_id int NOT NULL, value float); +SELECT create_hypertable('space2','time','device_id',number_partitions:=3); + create_hypertable +--------------------- + (6,public,space2,t) +(1 row) + +SELECT add_dimension('space2','tag_id',number_partitions:=3); + add_dimension +---------------------------- + (8,public,space2,tag_id,t) +(1 row) + +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 1, 1.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 1, 2.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 3, 1, 3.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 2, 1.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 2, 2.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 3, 2, 3.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 3, 1.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 3, 2.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 3, 3, 3.5; +ANALYZE space2; +CREATE TABLE space3(time timestamptz NOT NULL, x int NOT NULL, y int NOT NULL, z int NOT NULL, value float); +SELECT create_hypertable('space3','time','x',number_partitions:=2); + create_hypertable +--------------------- + (7,public,space3,t) +(1 row) + +SELECT add_dimension('space3','y',number_partitions:=2); + add_dimension +------------------------ + (11,public,space3,y,t) +(1 row) + +SELECT add_dimension('space3','z',number_partitions:=2); + add_dimension +------------------------ + (12,public,space3,z,t) +(1 row) + +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 1, 1, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 1, 2, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 2, 1, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 2, 2, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 1, 1, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 1, 2, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 2, 1, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 2, 2, 1.5; +ANALYZE space3; +CREATE TABLE sortopt_test(time timestamptz NOT NULL, device TEXT); +SELECT create_hypertable('sortopt_test','time',create_default_indexes:=false); + create_hypertable +--------------------------- + (8,public,sortopt_test,t) +(1 row) + +-- since alpine does not support locales we cant test collations in our ci +-- CREATE COLLATION IF NOT EXISTS en_US(LOCALE='en_US.utf8'); +-- CREATE INDEX time_device_utf8 ON sortopt_test(time, device COLLATE "en_US"); +CREATE INDEX time_device_nullsfirst ON sortopt_test(time, device NULLS FIRST); +CREATE INDEX time_device_nullslast ON sortopt_test(time, device DESC NULLS LAST); +INSERT INTO sortopt_test SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 'Device 1'; +ANALYZE sortopt_test; +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- print chunks ordered by time to ensure ordering we want +SELECT + ht.table_name AS hypertable, + c.table_name AS chunk, + ds.range_start +FROM + _timescaledb_catalog.chunk c + INNER JOIN LATERAL(SELECT * FROM _timescaledb_catalog.chunk_constraint cc WHERE c.id = cc.chunk_id ORDER BY cc.dimension_slice_id LIMIT 1) cc ON true + INNER JOIN _timescaledb_catalog.dimension_slice ds ON ds.id=cc.dimension_slice_id + INNER JOIN _timescaledb_catalog.dimension d ON ds.dimension_id = d.id + INNER JOIN _timescaledb_catalog.hypertable ht ON d.hypertable_id = ht.id +ORDER BY ht.table_name, range_start, chunk; + hypertable | chunk | range_start +------------------------+-------------------+---------------------- + dimension_last | _hyper_2_4_chunk | 946684800000000 + dimension_last | _hyper_2_5_chunk | 946771200000000 + dimension_last | _hyper_2_6_chunk | 946857600000000 + dimension_last | _hyper_2_7_chunk | 946944000000000 + dimension_only | _hyper_3_8_chunk | 946684800000000 + dimension_only | _hyper_3_9_chunk | 946857600000000 + dimension_only | _hyper_3_10_chunk | 947030400000000 + dimension_only | _hyper_3_11_chunk | 947203200000000 + ht_dropped_columns | _hyper_5_15_chunk | 946512000000000 + ht_dropped_columns | _hyper_5_16_chunk | 947116800000000 + ht_dropped_columns | _hyper_5_17_chunk | 947721600000000 + ht_dropped_columns | _hyper_5_18_chunk | 948326400000000 + ht_dropped_columns | _hyper_5_19_chunk | 948931200000000 + ht_missing_indexes | _hyper_4_12_chunk | 946512000000000 + ht_missing_indexes | _hyper_4_13_chunk | 947116800000000 + ht_missing_indexes | _hyper_4_14_chunk | 947721600000000 + ordered_append_reverse | _hyper_1_3_chunk | 946512000000000 + ordered_append_reverse | _hyper_1_2_chunk | 947116800000000 + ordered_append_reverse | _hyper_1_1_chunk | 947721600000000 + sortopt_test | _hyper_8_55_chunk | 946512000000000 + sortopt_test | _hyper_8_54_chunk | 947116800000000 + space2 | _hyper_6_21_chunk | -9223372036854775808 + space2 | _hyper_6_23_chunk | -9223372036854775808 + space2 | _hyper_6_25_chunk | -9223372036854775808 + space2 | _hyper_6_27_chunk | -9223372036854775808 + space2 | _hyper_6_33_chunk | -9223372036854775808 + space2 | _hyper_6_29_chunk | 946512000000000 + space2 | _hyper_6_31_chunk | 946512000000000 + space2 | _hyper_6_35_chunk | 946512000000000 + space2 | _hyper_6_37_chunk | 946512000000000 + space2 | _hyper_6_20_chunk | 947116800000000 + space2 | _hyper_6_22_chunk | 947116800000000 + space2 | _hyper_6_24_chunk | 947116800000000 + space2 | _hyper_6_26_chunk | 947116800000000 + space2 | _hyper_6_28_chunk | 947116800000000 + space2 | _hyper_6_30_chunk | 947116800000000 + space2 | _hyper_6_32_chunk | 947116800000000 + space2 | _hyper_6_34_chunk | 947116800000000 + space2 | _hyper_6_36_chunk | 947116800000000 + space3 | _hyper_7_39_chunk | -9223372036854775808 + space3 | _hyper_7_41_chunk | -9223372036854775808 + space3 | _hyper_7_43_chunk | -9223372036854775808 + space3 | _hyper_7_45_chunk | -9223372036854775808 + space3 | _hyper_7_47_chunk | -9223372036854775808 + space3 | _hyper_7_49_chunk | -9223372036854775808 + space3 | _hyper_7_51_chunk | -9223372036854775808 + space3 | _hyper_7_53_chunk | 946512000000000 + space3 | _hyper_7_38_chunk | 947116800000000 + space3 | _hyper_7_40_chunk | 947116800000000 + space3 | _hyper_7_42_chunk | 947116800000000 + space3 | _hyper_7_44_chunk | 947116800000000 + space3 | _hyper_7_46_chunk | 947116800000000 + space3 | _hyper_7_48_chunk | 947116800000000 + space3 | _hyper_7_50_chunk | 947116800000000 + space3 | _hyper_7_52_chunk | 947116800000000 +(55 rows) + +-- test ASC for reverse ordered chunks +:PREFIX SELECT + time, device_id, value +FROM ordered_append_reverse +ORDER BY time ASC LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ordered_append_reverse (actual rows=1 loops=1) + Order: ordered_append_reverse."time" + -> Index Scan Backward using _hyper_1_3_chunk_ordered_append_reverse_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_ordered_append_reverse_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan Backward using _hyper_1_1_chunk_ordered_append_reverse_time_idx on _hyper_1_1_chunk (never executed) +(6 rows) + +-- test DESC for reverse ordered chunks +:PREFIX SELECT + time, device_id, value +FROM ordered_append_reverse +ORDER BY time DESC LIMIT 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ordered_append_reverse (actual rows=1 loops=1) + Order: ordered_append_reverse."time" DESC + -> Index Scan using _hyper_1_1_chunk_ordered_append_reverse_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_1_2_chunk_ordered_append_reverse_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan using _hyper_1_3_chunk_ordered_append_reverse_time_idx on _hyper_1_3_chunk (never executed) +(6 rows) + +-- test query with ORDER BY time_bucket, device_id +-- must not use ordered append +:PREFIX SELECT + time_bucket('1d',time), device_id, name +FROM dimension_last +ORDER BY time_bucket('1d',time), device_id LIMIT 1; + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_2_4_chunk."time")), _hyper_2_4_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5760 loops=1) + -> Append (actual rows=5760 loops=1) + -> Seq Scan on _hyper_2_4_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_5_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_6_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=1440 loops=1) +(10 rows) + +-- test query with ORDER BY date_trunc, device_id +-- must not use ordered append +:PREFIX SELECT + date_trunc('day',time), device_id, name +FROM dimension_last +ORDER BY 1,2 LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_2_4_chunk."time")), _hyper_2_4_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5760 loops=1) + -> Append (actual rows=5760 loops=1) + -> Seq Scan on _hyper_2_4_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_5_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_6_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=1440 loops=1) +(10 rows) + +-- test with table with only dimension column +:PREFIX SELECT * FROM dimension_only ORDER BY time DESC LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on dimension_only (actual rows=1 loops=1) + Order: dimension_only."time" DESC + -> Index Only Scan using _hyper_3_11_chunk_dimension_only_time_idx on _hyper_3_11_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_3_10_chunk_dimension_only_time_idx on _hyper_3_10_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_3_9_chunk_dimension_only_time_idx on _hyper_3_9_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_3_8_chunk_dimension_only_time_idx on _hyper_3_8_chunk (never executed) + Heap Fetches: 0 +(11 rows) + +-- test LEFT JOIN against hypertable +:PREFIX_NO_ANALYZE SELECT * +FROM dimension_last +LEFT JOIN dimension_only USING (time) +ORDER BY dimension_last.time DESC +LIMIT 2; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Limit + -> Nested Loop Left Join + Join Filter: (dimension_last."time" = _hyper_3_11_chunk."time") + -> Custom Scan (ChunkAppend) on dimension_last + Order: dimension_last."time" DESC + -> Index Scan using _hyper_2_7_chunk_dimension_last_time_idx on _hyper_2_7_chunk + -> Index Scan using _hyper_2_6_chunk_dimension_last_time_idx on _hyper_2_6_chunk + -> Index Scan using _hyper_2_5_chunk_dimension_last_time_idx on _hyper_2_5_chunk + -> Index Scan using _hyper_2_4_chunk_dimension_last_time_idx on _hyper_2_4_chunk + -> Materialize + -> Append + -> Seq Scan on _hyper_3_11_chunk + -> Seq Scan on _hyper_3_10_chunk + -> Seq Scan on _hyper_3_9_chunk + -> Seq Scan on _hyper_3_8_chunk +(15 rows) + +-- test INNER JOIN against non-hypertable +:PREFIX_NO_ANALYZE SELECT * +FROM dimension_last +INNER JOIN dimension_only USING (time) +ORDER BY dimension_last.time DESC +LIMIT 2; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Limit + -> Nested Loop + -> Custom Scan (ChunkAppend) on dimension_only + Order: dimension_only."time" DESC + -> Index Only Scan using _hyper_3_11_chunk_dimension_only_time_idx on _hyper_3_11_chunk + -> Index Only Scan using _hyper_3_10_chunk_dimension_only_time_idx on _hyper_3_10_chunk + -> Index Only Scan using _hyper_3_9_chunk_dimension_only_time_idx on _hyper_3_9_chunk + -> Index Only Scan using _hyper_3_8_chunk_dimension_only_time_idx on _hyper_3_8_chunk + -> Append + -> Index Scan using _hyper_2_7_chunk_dimension_last_time_idx on _hyper_2_7_chunk + Index Cond: ("time" = dimension_only."time") + -> Index Scan using _hyper_2_6_chunk_dimension_last_time_idx on _hyper_2_6_chunk + Index Cond: ("time" = dimension_only."time") + -> Index Scan using _hyper_2_5_chunk_dimension_last_time_idx on _hyper_2_5_chunk + Index Cond: ("time" = dimension_only."time") + -> Index Scan using _hyper_2_4_chunk_dimension_last_time_idx on _hyper_2_4_chunk + Index Cond: ("time" = dimension_only."time") +(17 rows) + +-- test join against non-hypertable +:PREFIX SELECT * +FROM dimension_last +INNER JOIN devices USING(device_id) +ORDER BY dimension_last.time DESC +LIMIT 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=2 loops=1) + -> Nested Loop (actual rows=2 loops=1) + Join Filter: (dimension_last.device_id = devices.device_id) + -> Custom Scan (ChunkAppend) on dimension_last (actual rows=2 loops=1) + Order: dimension_last."time" DESC + -> Index Scan using _hyper_2_7_chunk_dimension_last_time_idx on _hyper_2_7_chunk (actual rows=2 loops=1) + -> Index Scan using _hyper_2_6_chunk_dimension_last_time_idx on _hyper_2_6_chunk (never executed) + -> Index Scan using _hyper_2_5_chunk_dimension_last_time_idx on _hyper_2_5_chunk (never executed) + -> Index Scan using _hyper_2_4_chunk_dimension_last_time_idx on _hyper_2_4_chunk (never executed) + -> Materialize (actual rows=1 loops=2) + -> Seq Scan on devices (actual rows=1 loops=1) +(11 rows) + +-- test hypertable with index missing on one chunk +:PREFIX SELECT + time, device_id, value +FROM ht_missing_indexes +ORDER BY time ASC LIMIT 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_missing_indexes (actual rows=1 loops=1) + Order: ht_missing_indexes."time" + -> Index Scan Backward using _hyper_4_12_chunk_ht_missing_indexes_time_idx on _hyper_4_12_chunk (actual rows=1 loops=1) + -> Sort (never executed) + Sort Key: _hyper_4_13_chunk."time" + -> Seq Scan on _hyper_4_13_chunk (never executed) + -> Index Scan Backward using _hyper_4_14_chunk_ht_missing_indexes_time_idx on _hyper_4_14_chunk (never executed) +(8 rows) + +-- test hypertable with index missing on one chunk +-- and no data +:PREFIX SELECT + time, device_id, value +FROM ht_missing_indexes +WHERE device_id = 2 +ORDER BY time DESC LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_missing_indexes (actual rows=1 loops=1) + Order: ht_missing_indexes."time" DESC + -> Index Scan using _hyper_4_14_chunk_ht_missing_indexes_time_idx on _hyper_4_14_chunk (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 1 + -> Sort (never executed) + Sort Key: _hyper_4_13_chunk."time" DESC + -> Seq Scan on _hyper_4_13_chunk (never executed) + Filter: (device_id = 2) + -> Index Scan using _hyper_4_12_chunk_ht_missing_indexes_time_idx on _hyper_4_12_chunk (never executed) + Filter: (device_id = 2) +(12 rows) + +-- test hypertable with index missing on one chunk +-- and no data +:PREFIX SELECT + time, device_id, value +FROM ht_missing_indexes +WHERE time > '2000-01-07' +ORDER BY time LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on ht_missing_indexes (actual rows=10 loops=1) + Order: ht_missing_indexes."time" + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_4_13_chunk."time" + Sort Method: top-N heapsort + -> Seq Scan on _hyper_4_13_chunk (actual rows=24477 loops=1) + Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 5763 + -> Index Scan Backward using _hyper_4_14_chunk_ht_missing_indexes_time_idx on _hyper_4_14_chunk (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) +(11 rows) + +-- test hypertable with dropped columns +:PREFIX SELECT + time, device_id, value +FROM ht_dropped_columns +ORDER BY time ASC LIMIT 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_dropped_columns (actual rows=1 loops=1) + Order: ht_dropped_columns."time" + -> Index Scan Backward using _hyper_5_15_chunk_ht_dropped_columns_time_idx on _hyper_5_15_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_5_16_chunk_ht_dropped_columns_time_idx on _hyper_5_16_chunk (never executed) + -> Index Scan Backward using _hyper_5_17_chunk_ht_dropped_columns_time_idx on _hyper_5_17_chunk (never executed) + -> Index Scan Backward using _hyper_5_18_chunk_ht_dropped_columns_time_idx on _hyper_5_18_chunk (never executed) + -> Index Scan Backward using _hyper_5_19_chunk_ht_dropped_columns_time_idx on _hyper_5_19_chunk (never executed) +(8 rows) + +-- test hypertable with dropped columns +:PREFIX SELECT + time, device_id, value +FROM ht_dropped_columns +WHERE device_id = 1 +ORDER BY time DESC; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on ht_dropped_columns (actual rows=7205 loops=1) + Order: ht_dropped_columns."time" DESC + -> Index Scan using _hyper_5_19_chunk_ht_dropped_columns_time_idx on _hyper_5_19_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_5_18_chunk_ht_dropped_columns_time_idx on _hyper_5_18_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_5_17_chunk_ht_dropped_columns_time_idx on _hyper_5_17_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_5_16_chunk_ht_dropped_columns_time_idx on _hyper_5_16_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_5_15_chunk_ht_dropped_columns_time_idx on _hyper_5_15_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) +(12 rows) + +-- test hypertable with 2 space dimensions +:PREFIX SELECT + time, device_id, value +FROM space2 +ORDER BY time DESC; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on space2 (actual rows=116649 loops=1) + Order: space2."time" DESC + -> Merge Append (actual rows=56169 loops=1) + Sort Key: _hyper_6_36_chunk."time" DESC + -> Index Scan using _hyper_6_36_chunk_space2_time_idx on _hyper_6_36_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_34_chunk_space2_time_idx on _hyper_6_34_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_32_chunk_space2_time_idx on _hyper_6_32_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_30_chunk_space2_time_idx on _hyper_6_30_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_28_chunk_space2_time_idx on _hyper_6_28_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_26_chunk_space2_time_idx on _hyper_6_26_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_24_chunk_space2_time_idx on _hyper_6_24_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_22_chunk_space2_time_idx on _hyper_6_22_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_20_chunk_space2_time_idx on _hyper_6_20_chunk (actual rows=6241 loops=1) + -> Merge Append (actual rows=60480 loops=1) + Sort Key: _hyper_6_37_chunk."time" DESC + -> Index Scan using _hyper_6_37_chunk_space2_time_idx on _hyper_6_37_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_35_chunk_space2_time_idx on _hyper_6_35_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_33_chunk_space2_time_idx on _hyper_6_33_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_31_chunk_space2_time_idx on _hyper_6_31_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_29_chunk_space2_time_idx on _hyper_6_29_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_27_chunk_space2_time_idx on _hyper_6_27_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_25_chunk_space2_time_idx on _hyper_6_25_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_23_chunk_space2_time_idx on _hyper_6_23_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_21_chunk_space2_time_idx on _hyper_6_21_chunk (actual rows=6720 loops=1) +(24 rows) + +-- test hypertable with 3 space dimensions +:PREFIX SELECT + time +FROM space3 +ORDER BY time DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on space3 (actual rows=103688 loops=1) + Order: space3."time" DESC + -> Merge Append (actual rows=49928 loops=1) + Sort Key: _hyper_7_52_chunk."time" DESC + -> Index Only Scan using _hyper_7_52_chunk_space3_time_idx on _hyper_7_52_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_50_chunk_space3_time_idx on _hyper_7_50_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_48_chunk_space3_time_idx on _hyper_7_48_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_46_chunk_space3_time_idx on _hyper_7_46_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_44_chunk_space3_time_idx on _hyper_7_44_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_42_chunk_space3_time_idx on _hyper_7_42_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_40_chunk_space3_time_idx on _hyper_7_40_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_38_chunk_space3_time_idx on _hyper_7_38_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Merge Append (actual rows=53760 loops=1) + Sort Key: _hyper_7_53_chunk."time" DESC + -> Index Only Scan using _hyper_7_53_chunk_space3_time_idx on _hyper_7_53_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_51_chunk_space3_time_idx on _hyper_7_51_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_49_chunk_space3_time_idx on _hyper_7_49_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_47_chunk_space3_time_idx on _hyper_7_47_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_45_chunk_space3_time_idx on _hyper_7_45_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_43_chunk_space3_time_idx on _hyper_7_43_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_41_chunk_space3_time_idx on _hyper_7_41_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_39_chunk_space3_time_idx on _hyper_7_39_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 +(38 rows) + +-- test COLLATION +-- cant be tested in our ci because alpine doesnt support locales +-- :PREFIX SELECT * FROM sortopt_test ORDER BY time, device COLLATE "en_US.utf8"; +-- test NULLS FIRST +:PREFIX SELECT * FROM sortopt_test ORDER BY time, device NULLS FIRST; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on sortopt_test (actual rows=12961 loops=1) + Order: sortopt_test."time", sortopt_test.device NULLS FIRST + -> Index Only Scan using _hyper_8_55_chunk_time_device_nullsfirst on _hyper_8_55_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_8_54_chunk_time_device_nullsfirst on _hyper_8_54_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 +(6 rows) + +-- test NULLS LAST +:PREFIX SELECT * FROM sortopt_test ORDER BY time, device DESC NULLS LAST; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on sortopt_test (actual rows=12961 loops=1) + Order: sortopt_test."time", sortopt_test.device DESC NULLS LAST + -> Index Only Scan using _hyper_8_55_chunk_time_device_nullslast on _hyper_8_55_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_8_54_chunk_time_device_nullslast on _hyper_8_54_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 +(6 rows) + +--generate the results into two different files +\set ECHO errors diff --git a/test/expected/plan_ordered_append-15.out b/test/expected/plan_ordered_append-15.out new file mode 100644 index 00000000000..24bf1c01da4 --- /dev/null +++ b/test/expected/plan_ordered_append-15.out @@ -0,0 +1,629 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- we run these with analyze to confirm that nodes that are not +-- needed to fulfill the limit are not executed +-- unfortunately this doesn't work on PostgreSQL 9.6 which lacks +-- the ability to turn off analyze timing summary so we run +-- them without ANALYZE on PostgreSQL 9.6, but since LATERAL plans +-- are different across versions we need version specific output +-- here anyway. +\set TEST_BASE_NAME plan_ordered_append +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') as "TEST_LOAD_NAME", + format('include/%s_query.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME", + format('%s/results/%s_results_optimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_OPTIMIZED", + format('%s/results/%s_results_unoptimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNOPTIMIZED" +\gset +SELECT format('\! diff -u --label "Unoptimized result" --label "Optimized result" %s %s', :'TEST_RESULTS_UNOPTIMIZED', :'TEST_RESULTS_OPTIMIZED') as "DIFF_CMD" +\gset +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +\set PREFIX_NO_ANALYZE 'EXPLAIN (costs off)' +\ir :TEST_LOAD_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- create a now() function for repeatable testing that always returns +-- the same timestamp. It needs to be marked STABLE +CREATE OR REPLACE FUNCTION now_s() +RETURNS timestamptz LANGUAGE PLPGSQL STABLE AS +$BODY$ +BEGIN + RETURN '2000-01-08T0:00:00+0'::timestamptz; +END; +$BODY$; +CREATE TABLE devices(device_id INT PRIMARY KEY, name TEXT); +INSERT INTO devices VALUES +(1,'Device 1'), +(2,'Device 2'), +(3,'Device 3'); +-- create a second table where we create chunks in reverse order +CREATE TABLE ordered_append_reverse(time timestamptz NOT NULL, device_id INT, value float); +SELECT create_hypertable('ordered_append_reverse','time'); + create_hypertable +------------------------------------- + (1,public,ordered_append_reverse,t) +(1 row) + +INSERT INTO ordered_append_reverse SELECT generate_series('2000-01-18'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 0.5; +-- table where dimension column is last column +CREATE TABLE IF NOT EXISTS dimension_last( + id INT8 NOT NULL, + device_id INT NOT NULL, + name TEXT NOT NULL, + time timestamptz NOT NULL +); +SELECT create_hypertable('dimension_last', 'time', chunk_time_interval => interval '1day', if_not_exists => True); + create_hypertable +----------------------------- + (2,public,dimension_last,t) +(1 row) + +-- table with only dimension column +CREATE TABLE IF NOT EXISTS dimension_only( + time timestamptz NOT NULL +); +SELECT create_hypertable('dimension_only', 'time', chunk_time_interval => interval '1day', if_not_exists => True); + create_hypertable +----------------------------- + (3,public,dimension_only,t) +(1 row) + +INSERT INTO dimension_last SELECT 1,1,'Device 1',generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-04 23:59:00+0'::timestamptz,'1m'::interval); +INSERT INTO dimension_only VALUES +('2000-01-01'), +('2000-01-03'), +('2000-01-05'), +('2000-01-07'); +ANALYZE devices; +ANALYZE ordered_append_reverse; +ANALYZE dimension_last; +ANALYZE dimension_only; +-- create hypertable with indexes not on all chunks +CREATE TABLE ht_missing_indexes(time timestamptz NOT NULL, device_id int, value float); +SELECT create_hypertable('ht_missing_indexes','time'); + create_hypertable +--------------------------------- + (4,public,ht_missing_indexes,t) +(1 row) + +INSERT INTO ht_missing_indexes SELECT generate_series('2000-01-01'::timestamptz,'2000-01-18'::timestamptz,'1m'::interval), 1, 0.5; +INSERT INTO ht_missing_indexes SELECT generate_series('2000-01-01'::timestamptz,'2000-01-18'::timestamptz,'1m'::interval), 2, 1.5; +INSERT INTO ht_missing_indexes SELECT generate_series('2000-01-01'::timestamptz,'2000-01-18'::timestamptz,'1m'::interval), 3, 2.5; +-- drop index from 2nd chunk of ht_missing_indexes +SELECT format('%I.%I',i.schemaname,i.indexname) AS "INDEX_NAME" +FROM _timescaledb_catalog.chunk c +INNER JOIN _timescaledb_catalog.hypertable ht ON c.hypertable_id = ht.id +INNER JOIN pg_indexes i ON i.schemaname = c.schema_name AND i.tablename=c.table_name +WHERE ht.table_name = 'ht_missing_indexes' +ORDER BY c.id LIMIT 1 OFFSET 1 \gset +DROP INDEX :INDEX_NAME; +ANALYZE ht_missing_indexes; +-- create hypertable with with dropped columns +CREATE TABLE ht_dropped_columns(c1 int, c2 int, c3 int, c4 int, c5 int, time timestamptz NOT NULL, device_id int, value float); +SELECT create_hypertable('ht_dropped_columns','time'); + create_hypertable +--------------------------------- + (5,public,ht_dropped_columns,t) +(1 row) + +ALTER TABLE ht_dropped_columns DROP COLUMN c1; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-01'::timestamptz,'2000-01-02'::timestamptz,'1m'::interval), 1, 0.5; +ALTER TABLE ht_dropped_columns DROP COLUMN c2; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-08'::timestamptz,'2000-01-09'::timestamptz,'1m'::interval), 1, 0.5; +ALTER TABLE ht_dropped_columns DROP COLUMN c3; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-15'::timestamptz,'2000-01-16'::timestamptz,'1m'::interval), 1, 0.5; +ALTER TABLE ht_dropped_columns DROP COLUMN c4; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-22'::timestamptz,'2000-01-23'::timestamptz,'1m'::interval), 1, 0.5; +ALTER TABLE ht_dropped_columns DROP COLUMN c5; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-29'::timestamptz,'2000-01-30'::timestamptz,'1m'::interval), 1, 0.5; +ANALYZE ht_dropped_columns; +CREATE TABLE space2(time timestamptz NOT NULL, device_id int NOT NULL, tag_id int NOT NULL, value float); +SELECT create_hypertable('space2','time','device_id',number_partitions:=3); + create_hypertable +--------------------- + (6,public,space2,t) +(1 row) + +SELECT add_dimension('space2','tag_id',number_partitions:=3); + add_dimension +---------------------------- + (8,public,space2,tag_id,t) +(1 row) + +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 1, 1.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 1, 2.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 3, 1, 3.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 2, 1.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 2, 2.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 3, 2, 3.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 3, 1.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 3, 2.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 3, 3, 3.5; +ANALYZE space2; +CREATE TABLE space3(time timestamptz NOT NULL, x int NOT NULL, y int NOT NULL, z int NOT NULL, value float); +SELECT create_hypertable('space3','time','x',number_partitions:=2); + create_hypertable +--------------------- + (7,public,space3,t) +(1 row) + +SELECT add_dimension('space3','y',number_partitions:=2); + add_dimension +------------------------ + (11,public,space3,y,t) +(1 row) + +SELECT add_dimension('space3','z',number_partitions:=2); + add_dimension +------------------------ + (12,public,space3,z,t) +(1 row) + +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 1, 1, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 1, 2, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 2, 1, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 2, 2, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 1, 1, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 1, 2, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 2, 1, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 2, 2, 1.5; +ANALYZE space3; +CREATE TABLE sortopt_test(time timestamptz NOT NULL, device TEXT); +SELECT create_hypertable('sortopt_test','time',create_default_indexes:=false); + create_hypertable +--------------------------- + (8,public,sortopt_test,t) +(1 row) + +-- since alpine does not support locales we cant test collations in our ci +-- CREATE COLLATION IF NOT EXISTS en_US(LOCALE='en_US.utf8'); +-- CREATE INDEX time_device_utf8 ON sortopt_test(time, device COLLATE "en_US"); +CREATE INDEX time_device_nullsfirst ON sortopt_test(time, device NULLS FIRST); +CREATE INDEX time_device_nullslast ON sortopt_test(time, device DESC NULLS LAST); +INSERT INTO sortopt_test SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 'Device 1'; +ANALYZE sortopt_test; +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- print chunks ordered by time to ensure ordering we want +SELECT + ht.table_name AS hypertable, + c.table_name AS chunk, + ds.range_start +FROM + _timescaledb_catalog.chunk c + INNER JOIN LATERAL(SELECT * FROM _timescaledb_catalog.chunk_constraint cc WHERE c.id = cc.chunk_id ORDER BY cc.dimension_slice_id LIMIT 1) cc ON true + INNER JOIN _timescaledb_catalog.dimension_slice ds ON ds.id=cc.dimension_slice_id + INNER JOIN _timescaledb_catalog.dimension d ON ds.dimension_id = d.id + INNER JOIN _timescaledb_catalog.hypertable ht ON d.hypertable_id = ht.id +ORDER BY ht.table_name, range_start, chunk; + hypertable | chunk | range_start +------------------------+-------------------+---------------------- + dimension_last | _hyper_2_4_chunk | 946684800000000 + dimension_last | _hyper_2_5_chunk | 946771200000000 + dimension_last | _hyper_2_6_chunk | 946857600000000 + dimension_last | _hyper_2_7_chunk | 946944000000000 + dimension_only | _hyper_3_8_chunk | 946684800000000 + dimension_only | _hyper_3_9_chunk | 946857600000000 + dimension_only | _hyper_3_10_chunk | 947030400000000 + dimension_only | _hyper_3_11_chunk | 947203200000000 + ht_dropped_columns | _hyper_5_15_chunk | 946512000000000 + ht_dropped_columns | _hyper_5_16_chunk | 947116800000000 + ht_dropped_columns | _hyper_5_17_chunk | 947721600000000 + ht_dropped_columns | _hyper_5_18_chunk | 948326400000000 + ht_dropped_columns | _hyper_5_19_chunk | 948931200000000 + ht_missing_indexes | _hyper_4_12_chunk | 946512000000000 + ht_missing_indexes | _hyper_4_13_chunk | 947116800000000 + ht_missing_indexes | _hyper_4_14_chunk | 947721600000000 + ordered_append_reverse | _hyper_1_3_chunk | 946512000000000 + ordered_append_reverse | _hyper_1_2_chunk | 947116800000000 + ordered_append_reverse | _hyper_1_1_chunk | 947721600000000 + sortopt_test | _hyper_8_55_chunk | 946512000000000 + sortopt_test | _hyper_8_54_chunk | 947116800000000 + space2 | _hyper_6_21_chunk | -9223372036854775808 + space2 | _hyper_6_23_chunk | -9223372036854775808 + space2 | _hyper_6_25_chunk | -9223372036854775808 + space2 | _hyper_6_27_chunk | -9223372036854775808 + space2 | _hyper_6_33_chunk | -9223372036854775808 + space2 | _hyper_6_29_chunk | 946512000000000 + space2 | _hyper_6_31_chunk | 946512000000000 + space2 | _hyper_6_35_chunk | 946512000000000 + space2 | _hyper_6_37_chunk | 946512000000000 + space2 | _hyper_6_20_chunk | 947116800000000 + space2 | _hyper_6_22_chunk | 947116800000000 + space2 | _hyper_6_24_chunk | 947116800000000 + space2 | _hyper_6_26_chunk | 947116800000000 + space2 | _hyper_6_28_chunk | 947116800000000 + space2 | _hyper_6_30_chunk | 947116800000000 + space2 | _hyper_6_32_chunk | 947116800000000 + space2 | _hyper_6_34_chunk | 947116800000000 + space2 | _hyper_6_36_chunk | 947116800000000 + space3 | _hyper_7_39_chunk | -9223372036854775808 + space3 | _hyper_7_41_chunk | -9223372036854775808 + space3 | _hyper_7_43_chunk | -9223372036854775808 + space3 | _hyper_7_45_chunk | -9223372036854775808 + space3 | _hyper_7_47_chunk | -9223372036854775808 + space3 | _hyper_7_49_chunk | -9223372036854775808 + space3 | _hyper_7_51_chunk | -9223372036854775808 + space3 | _hyper_7_53_chunk | 946512000000000 + space3 | _hyper_7_38_chunk | 947116800000000 + space3 | _hyper_7_40_chunk | 947116800000000 + space3 | _hyper_7_42_chunk | 947116800000000 + space3 | _hyper_7_44_chunk | 947116800000000 + space3 | _hyper_7_46_chunk | 947116800000000 + space3 | _hyper_7_48_chunk | 947116800000000 + space3 | _hyper_7_50_chunk | 947116800000000 + space3 | _hyper_7_52_chunk | 947116800000000 +(55 rows) + +-- test ASC for reverse ordered chunks +:PREFIX SELECT + time, device_id, value +FROM ordered_append_reverse +ORDER BY time ASC LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ordered_append_reverse (actual rows=1 loops=1) + Order: ordered_append_reverse."time" + -> Index Scan Backward using _hyper_1_3_chunk_ordered_append_reverse_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_ordered_append_reverse_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan Backward using _hyper_1_1_chunk_ordered_append_reverse_time_idx on _hyper_1_1_chunk (never executed) +(6 rows) + +-- test DESC for reverse ordered chunks +:PREFIX SELECT + time, device_id, value +FROM ordered_append_reverse +ORDER BY time DESC LIMIT 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ordered_append_reverse (actual rows=1 loops=1) + Order: ordered_append_reverse."time" DESC + -> Index Scan using _hyper_1_1_chunk_ordered_append_reverse_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_1_2_chunk_ordered_append_reverse_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan using _hyper_1_3_chunk_ordered_append_reverse_time_idx on _hyper_1_3_chunk (never executed) +(6 rows) + +-- test query with ORDER BY time_bucket, device_id +-- must not use ordered append +:PREFIX SELECT + time_bucket('1d',time), device_id, name +FROM dimension_last +ORDER BY time_bucket('1d',time), device_id LIMIT 1; + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_2_4_chunk."time")), _hyper_2_4_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5760 loops=1) + -> Append (actual rows=5760 loops=1) + -> Seq Scan on _hyper_2_4_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_5_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_6_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=1440 loops=1) +(10 rows) + +-- test query with ORDER BY date_trunc, device_id +-- must not use ordered append +:PREFIX SELECT + date_trunc('day',time), device_id, name +FROM dimension_last +ORDER BY 1,2 LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_2_4_chunk."time")), _hyper_2_4_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5760 loops=1) + -> Append (actual rows=5760 loops=1) + -> Seq Scan on _hyper_2_4_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_5_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_6_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=1440 loops=1) +(10 rows) + +-- test with table with only dimension column +:PREFIX SELECT * FROM dimension_only ORDER BY time DESC LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on dimension_only (actual rows=1 loops=1) + Order: dimension_only."time" DESC + -> Index Only Scan using _hyper_3_11_chunk_dimension_only_time_idx on _hyper_3_11_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_3_10_chunk_dimension_only_time_idx on _hyper_3_10_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_3_9_chunk_dimension_only_time_idx on _hyper_3_9_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_3_8_chunk_dimension_only_time_idx on _hyper_3_8_chunk (never executed) + Heap Fetches: 0 +(11 rows) + +-- test LEFT JOIN against hypertable +:PREFIX_NO_ANALYZE SELECT * +FROM dimension_last +LEFT JOIN dimension_only USING (time) +ORDER BY dimension_last.time DESC +LIMIT 2; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Limit + -> Nested Loop Left Join + Join Filter: (dimension_last."time" = _hyper_3_11_chunk."time") + -> Custom Scan (ChunkAppend) on dimension_last + Order: dimension_last."time" DESC + -> Index Scan using _hyper_2_7_chunk_dimension_last_time_idx on _hyper_2_7_chunk + -> Index Scan using _hyper_2_6_chunk_dimension_last_time_idx on _hyper_2_6_chunk + -> Index Scan using _hyper_2_5_chunk_dimension_last_time_idx on _hyper_2_5_chunk + -> Index Scan using _hyper_2_4_chunk_dimension_last_time_idx on _hyper_2_4_chunk + -> Materialize + -> Append + -> Seq Scan on _hyper_3_11_chunk + -> Seq Scan on _hyper_3_10_chunk + -> Seq Scan on _hyper_3_9_chunk + -> Seq Scan on _hyper_3_8_chunk +(15 rows) + +-- test INNER JOIN against non-hypertable +:PREFIX_NO_ANALYZE SELECT * +FROM dimension_last +INNER JOIN dimension_only USING (time) +ORDER BY dimension_last.time DESC +LIMIT 2; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Limit + -> Nested Loop + -> Custom Scan (ChunkAppend) on dimension_only + Order: dimension_only."time" DESC + -> Index Only Scan using _hyper_3_11_chunk_dimension_only_time_idx on _hyper_3_11_chunk + -> Index Only Scan using _hyper_3_10_chunk_dimension_only_time_idx on _hyper_3_10_chunk + -> Index Only Scan using _hyper_3_9_chunk_dimension_only_time_idx on _hyper_3_9_chunk + -> Index Only Scan using _hyper_3_8_chunk_dimension_only_time_idx on _hyper_3_8_chunk + -> Append + -> Index Scan using _hyper_2_7_chunk_dimension_last_time_idx on _hyper_2_7_chunk + Index Cond: ("time" = dimension_only."time") + -> Index Scan using _hyper_2_6_chunk_dimension_last_time_idx on _hyper_2_6_chunk + Index Cond: ("time" = dimension_only."time") + -> Index Scan using _hyper_2_5_chunk_dimension_last_time_idx on _hyper_2_5_chunk + Index Cond: ("time" = dimension_only."time") + -> Index Scan using _hyper_2_4_chunk_dimension_last_time_idx on _hyper_2_4_chunk + Index Cond: ("time" = dimension_only."time") +(17 rows) + +-- test join against non-hypertable +:PREFIX SELECT * +FROM dimension_last +INNER JOIN devices USING(device_id) +ORDER BY dimension_last.time DESC +LIMIT 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=2 loops=1) + -> Nested Loop (actual rows=2 loops=1) + Join Filter: (dimension_last.device_id = devices.device_id) + -> Custom Scan (ChunkAppend) on dimension_last (actual rows=2 loops=1) + Order: dimension_last."time" DESC + -> Index Scan using _hyper_2_7_chunk_dimension_last_time_idx on _hyper_2_7_chunk (actual rows=2 loops=1) + -> Index Scan using _hyper_2_6_chunk_dimension_last_time_idx on _hyper_2_6_chunk (never executed) + -> Index Scan using _hyper_2_5_chunk_dimension_last_time_idx on _hyper_2_5_chunk (never executed) + -> Index Scan using _hyper_2_4_chunk_dimension_last_time_idx on _hyper_2_4_chunk (never executed) + -> Materialize (actual rows=1 loops=2) + -> Seq Scan on devices (actual rows=1 loops=1) +(11 rows) + +-- test hypertable with index missing on one chunk +:PREFIX SELECT + time, device_id, value +FROM ht_missing_indexes +ORDER BY time ASC LIMIT 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_missing_indexes (actual rows=1 loops=1) + Order: ht_missing_indexes."time" + -> Index Scan Backward using _hyper_4_12_chunk_ht_missing_indexes_time_idx on _hyper_4_12_chunk (actual rows=1 loops=1) + -> Sort (never executed) + Sort Key: _hyper_4_13_chunk."time" + -> Seq Scan on _hyper_4_13_chunk (never executed) + -> Index Scan Backward using _hyper_4_14_chunk_ht_missing_indexes_time_idx on _hyper_4_14_chunk (never executed) +(8 rows) + +-- test hypertable with index missing on one chunk +-- and no data +:PREFIX SELECT + time, device_id, value +FROM ht_missing_indexes +WHERE device_id = 2 +ORDER BY time DESC LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_missing_indexes (actual rows=1 loops=1) + Order: ht_missing_indexes."time" DESC + -> Index Scan using _hyper_4_14_chunk_ht_missing_indexes_time_idx on _hyper_4_14_chunk (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 1 + -> Sort (never executed) + Sort Key: _hyper_4_13_chunk."time" DESC + -> Seq Scan on _hyper_4_13_chunk (never executed) + Filter: (device_id = 2) + -> Index Scan using _hyper_4_12_chunk_ht_missing_indexes_time_idx on _hyper_4_12_chunk (never executed) + Filter: (device_id = 2) +(12 rows) + +-- test hypertable with index missing on one chunk +-- and no data +:PREFIX SELECT + time, device_id, value +FROM ht_missing_indexes +WHERE time > '2000-01-07' +ORDER BY time LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on ht_missing_indexes (actual rows=10 loops=1) + Order: ht_missing_indexes."time" + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_4_13_chunk."time" + Sort Method: top-N heapsort + -> Seq Scan on _hyper_4_13_chunk (actual rows=24477 loops=1) + Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 5763 + -> Index Scan Backward using _hyper_4_14_chunk_ht_missing_indexes_time_idx on _hyper_4_14_chunk (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) +(11 rows) + +-- test hypertable with dropped columns +:PREFIX SELECT + time, device_id, value +FROM ht_dropped_columns +ORDER BY time ASC LIMIT 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_dropped_columns (actual rows=1 loops=1) + Order: ht_dropped_columns."time" + -> Index Scan Backward using _hyper_5_15_chunk_ht_dropped_columns_time_idx on _hyper_5_15_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_5_16_chunk_ht_dropped_columns_time_idx on _hyper_5_16_chunk (never executed) + -> Index Scan Backward using _hyper_5_17_chunk_ht_dropped_columns_time_idx on _hyper_5_17_chunk (never executed) + -> Index Scan Backward using _hyper_5_18_chunk_ht_dropped_columns_time_idx on _hyper_5_18_chunk (never executed) + -> Index Scan Backward using _hyper_5_19_chunk_ht_dropped_columns_time_idx on _hyper_5_19_chunk (never executed) +(8 rows) + +-- test hypertable with dropped columns +:PREFIX SELECT + time, device_id, value +FROM ht_dropped_columns +WHERE device_id = 1 +ORDER BY time DESC; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on ht_dropped_columns (actual rows=7205 loops=1) + Order: ht_dropped_columns."time" DESC + -> Index Scan using _hyper_5_19_chunk_ht_dropped_columns_time_idx on _hyper_5_19_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_5_18_chunk_ht_dropped_columns_time_idx on _hyper_5_18_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_5_17_chunk_ht_dropped_columns_time_idx on _hyper_5_17_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_5_16_chunk_ht_dropped_columns_time_idx on _hyper_5_16_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_5_15_chunk_ht_dropped_columns_time_idx on _hyper_5_15_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) +(12 rows) + +-- test hypertable with 2 space dimensions +:PREFIX SELECT + time, device_id, value +FROM space2 +ORDER BY time DESC; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on space2 (actual rows=116649 loops=1) + Order: space2."time" DESC + -> Merge Append (actual rows=56169 loops=1) + Sort Key: _hyper_6_36_chunk."time" DESC + -> Index Scan using _hyper_6_36_chunk_space2_time_idx on _hyper_6_36_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_34_chunk_space2_time_idx on _hyper_6_34_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_32_chunk_space2_time_idx on _hyper_6_32_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_30_chunk_space2_time_idx on _hyper_6_30_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_28_chunk_space2_time_idx on _hyper_6_28_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_26_chunk_space2_time_idx on _hyper_6_26_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_24_chunk_space2_time_idx on _hyper_6_24_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_22_chunk_space2_time_idx on _hyper_6_22_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_20_chunk_space2_time_idx on _hyper_6_20_chunk (actual rows=6241 loops=1) + -> Merge Append (actual rows=60480 loops=1) + Sort Key: _hyper_6_37_chunk."time" DESC + -> Index Scan using _hyper_6_37_chunk_space2_time_idx on _hyper_6_37_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_35_chunk_space2_time_idx on _hyper_6_35_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_33_chunk_space2_time_idx on _hyper_6_33_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_31_chunk_space2_time_idx on _hyper_6_31_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_29_chunk_space2_time_idx on _hyper_6_29_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_27_chunk_space2_time_idx on _hyper_6_27_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_25_chunk_space2_time_idx on _hyper_6_25_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_23_chunk_space2_time_idx on _hyper_6_23_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_21_chunk_space2_time_idx on _hyper_6_21_chunk (actual rows=6720 loops=1) +(24 rows) + +-- test hypertable with 3 space dimensions +:PREFIX SELECT + time +FROM space3 +ORDER BY time DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on space3 (actual rows=103688 loops=1) + Order: space3."time" DESC + -> Merge Append (actual rows=49928 loops=1) + Sort Key: _hyper_7_52_chunk."time" DESC + -> Index Only Scan using _hyper_7_52_chunk_space3_time_idx on _hyper_7_52_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_50_chunk_space3_time_idx on _hyper_7_50_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_48_chunk_space3_time_idx on _hyper_7_48_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_46_chunk_space3_time_idx on _hyper_7_46_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_44_chunk_space3_time_idx on _hyper_7_44_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_42_chunk_space3_time_idx on _hyper_7_42_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_40_chunk_space3_time_idx on _hyper_7_40_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_38_chunk_space3_time_idx on _hyper_7_38_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Merge Append (actual rows=53760 loops=1) + Sort Key: _hyper_7_53_chunk."time" DESC + -> Index Only Scan using _hyper_7_53_chunk_space3_time_idx on _hyper_7_53_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_51_chunk_space3_time_idx on _hyper_7_51_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_49_chunk_space3_time_idx on _hyper_7_49_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_47_chunk_space3_time_idx on _hyper_7_47_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_45_chunk_space3_time_idx on _hyper_7_45_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_43_chunk_space3_time_idx on _hyper_7_43_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_41_chunk_space3_time_idx on _hyper_7_41_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_39_chunk_space3_time_idx on _hyper_7_39_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 +(38 rows) + +-- test COLLATION +-- cant be tested in our ci because alpine doesnt support locales +-- :PREFIX SELECT * FROM sortopt_test ORDER BY time, device COLLATE "en_US.utf8"; +-- test NULLS FIRST +:PREFIX SELECT * FROM sortopt_test ORDER BY time, device NULLS FIRST; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on sortopt_test (actual rows=12961 loops=1) + Order: sortopt_test."time", sortopt_test.device NULLS FIRST + -> Index Only Scan using _hyper_8_55_chunk_time_device_nullsfirst on _hyper_8_55_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_8_54_chunk_time_device_nullsfirst on _hyper_8_54_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 +(6 rows) + +-- test NULLS LAST +:PREFIX SELECT * FROM sortopt_test ORDER BY time, device DESC NULLS LAST; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on sortopt_test (actual rows=12961 loops=1) + Order: sortopt_test."time", sortopt_test.device DESC NULLS LAST + -> Index Only Scan using _hyper_8_55_chunk_time_device_nullslast on _hyper_8_55_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_8_54_chunk_time_device_nullslast on _hyper_8_54_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 +(6 rows) + +--generate the results into two different files +\set ECHO errors diff --git a/test/expected/plan_ordered_append-16.out b/test/expected/plan_ordered_append-16.out new file mode 100644 index 00000000000..0a17c4c8cee --- /dev/null +++ b/test/expected/plan_ordered_append-16.out @@ -0,0 +1,629 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- we run these with analyze to confirm that nodes that are not +-- needed to fulfill the limit are not executed +-- unfortunately this doesn't work on PostgreSQL 9.6 which lacks +-- the ability to turn off analyze timing summary so we run +-- them without ANALYZE on PostgreSQL 9.6, but since LATERAL plans +-- are different across versions we need version specific output +-- here anyway. +\set TEST_BASE_NAME plan_ordered_append +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') as "TEST_LOAD_NAME", + format('include/%s_query.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME", + format('%s/results/%s_results_optimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_OPTIMIZED", + format('%s/results/%s_results_unoptimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNOPTIMIZED" +\gset +SELECT format('\! diff -u --label "Unoptimized result" --label "Optimized result" %s %s', :'TEST_RESULTS_UNOPTIMIZED', :'TEST_RESULTS_OPTIMIZED') as "DIFF_CMD" +\gset +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +\set PREFIX_NO_ANALYZE 'EXPLAIN (costs off)' +\ir :TEST_LOAD_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- create a now() function for repeatable testing that always returns +-- the same timestamp. It needs to be marked STABLE +CREATE OR REPLACE FUNCTION now_s() +RETURNS timestamptz LANGUAGE PLPGSQL STABLE AS +$BODY$ +BEGIN + RETURN '2000-01-08T0:00:00+0'::timestamptz; +END; +$BODY$; +CREATE TABLE devices(device_id INT PRIMARY KEY, name TEXT); +INSERT INTO devices VALUES +(1,'Device 1'), +(2,'Device 2'), +(3,'Device 3'); +-- create a second table where we create chunks in reverse order +CREATE TABLE ordered_append_reverse(time timestamptz NOT NULL, device_id INT, value float); +SELECT create_hypertable('ordered_append_reverse','time'); + create_hypertable +------------------------------------- + (1,public,ordered_append_reverse,t) +(1 row) + +INSERT INTO ordered_append_reverse SELECT generate_series('2000-01-18'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 0.5; +-- table where dimension column is last column +CREATE TABLE IF NOT EXISTS dimension_last( + id INT8 NOT NULL, + device_id INT NOT NULL, + name TEXT NOT NULL, + time timestamptz NOT NULL +); +SELECT create_hypertable('dimension_last', 'time', chunk_time_interval => interval '1day', if_not_exists => True); + create_hypertable +----------------------------- + (2,public,dimension_last,t) +(1 row) + +-- table with only dimension column +CREATE TABLE IF NOT EXISTS dimension_only( + time timestamptz NOT NULL +); +SELECT create_hypertable('dimension_only', 'time', chunk_time_interval => interval '1day', if_not_exists => True); + create_hypertable +----------------------------- + (3,public,dimension_only,t) +(1 row) + +INSERT INTO dimension_last SELECT 1,1,'Device 1',generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-04 23:59:00+0'::timestamptz,'1m'::interval); +INSERT INTO dimension_only VALUES +('2000-01-01'), +('2000-01-03'), +('2000-01-05'), +('2000-01-07'); +ANALYZE devices; +ANALYZE ordered_append_reverse; +ANALYZE dimension_last; +ANALYZE dimension_only; +-- create hypertable with indexes not on all chunks +CREATE TABLE ht_missing_indexes(time timestamptz NOT NULL, device_id int, value float); +SELECT create_hypertable('ht_missing_indexes','time'); + create_hypertable +--------------------------------- + (4,public,ht_missing_indexes,t) +(1 row) + +INSERT INTO ht_missing_indexes SELECT generate_series('2000-01-01'::timestamptz,'2000-01-18'::timestamptz,'1m'::interval), 1, 0.5; +INSERT INTO ht_missing_indexes SELECT generate_series('2000-01-01'::timestamptz,'2000-01-18'::timestamptz,'1m'::interval), 2, 1.5; +INSERT INTO ht_missing_indexes SELECT generate_series('2000-01-01'::timestamptz,'2000-01-18'::timestamptz,'1m'::interval), 3, 2.5; +-- drop index from 2nd chunk of ht_missing_indexes +SELECT format('%I.%I',i.schemaname,i.indexname) AS "INDEX_NAME" +FROM _timescaledb_catalog.chunk c +INNER JOIN _timescaledb_catalog.hypertable ht ON c.hypertable_id = ht.id +INNER JOIN pg_indexes i ON i.schemaname = c.schema_name AND i.tablename=c.table_name +WHERE ht.table_name = 'ht_missing_indexes' +ORDER BY c.id LIMIT 1 OFFSET 1 \gset +DROP INDEX :INDEX_NAME; +ANALYZE ht_missing_indexes; +-- create hypertable with with dropped columns +CREATE TABLE ht_dropped_columns(c1 int, c2 int, c3 int, c4 int, c5 int, time timestamptz NOT NULL, device_id int, value float); +SELECT create_hypertable('ht_dropped_columns','time'); + create_hypertable +--------------------------------- + (5,public,ht_dropped_columns,t) +(1 row) + +ALTER TABLE ht_dropped_columns DROP COLUMN c1; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-01'::timestamptz,'2000-01-02'::timestamptz,'1m'::interval), 1, 0.5; +ALTER TABLE ht_dropped_columns DROP COLUMN c2; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-08'::timestamptz,'2000-01-09'::timestamptz,'1m'::interval), 1, 0.5; +ALTER TABLE ht_dropped_columns DROP COLUMN c3; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-15'::timestamptz,'2000-01-16'::timestamptz,'1m'::interval), 1, 0.5; +ALTER TABLE ht_dropped_columns DROP COLUMN c4; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-22'::timestamptz,'2000-01-23'::timestamptz,'1m'::interval), 1, 0.5; +ALTER TABLE ht_dropped_columns DROP COLUMN c5; +INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('2000-01-29'::timestamptz,'2000-01-30'::timestamptz,'1m'::interval), 1, 0.5; +ANALYZE ht_dropped_columns; +CREATE TABLE space2(time timestamptz NOT NULL, device_id int NOT NULL, tag_id int NOT NULL, value float); +SELECT create_hypertable('space2','time','device_id',number_partitions:=3); + create_hypertable +--------------------- + (6,public,space2,t) +(1 row) + +SELECT add_dimension('space2','tag_id',number_partitions:=3); + add_dimension +---------------------------- + (8,public,space2,tag_id,t) +(1 row) + +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 1, 1.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 1, 2.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 3, 1, 3.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 2, 1.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 2, 2.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 3, 2, 3.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 3, 1.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 3, 2.5; +INSERT INTO space2 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 3, 3, 3.5; +ANALYZE space2; +CREATE TABLE space3(time timestamptz NOT NULL, x int NOT NULL, y int NOT NULL, z int NOT NULL, value float); +SELECT create_hypertable('space3','time','x',number_partitions:=2); + create_hypertable +--------------------- + (7,public,space3,t) +(1 row) + +SELECT add_dimension('space3','y',number_partitions:=2); + add_dimension +------------------------ + (11,public,space3,y,t) +(1 row) + +SELECT add_dimension('space3','z',number_partitions:=2); + add_dimension +------------------------ + (12,public,space3,z,t) +(1 row) + +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 1, 1, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 1, 2, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 2, 1, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 2, 2, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 1, 1, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 1, 2, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 2, 1, 1.5; +INSERT INTO space3 SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 2, 2, 1.5; +ANALYZE space3; +CREATE TABLE sortopt_test(time timestamptz NOT NULL, device TEXT); +SELECT create_hypertable('sortopt_test','time',create_default_indexes:=false); + create_hypertable +--------------------------- + (8,public,sortopt_test,t) +(1 row) + +-- since alpine does not support locales we cant test collations in our ci +-- CREATE COLLATION IF NOT EXISTS en_US(LOCALE='en_US.utf8'); +-- CREATE INDEX time_device_utf8 ON sortopt_test(time, device COLLATE "en_US"); +CREATE INDEX time_device_nullsfirst ON sortopt_test(time, device NULLS FIRST); +CREATE INDEX time_device_nullslast ON sortopt_test(time, device DESC NULLS LAST); +INSERT INTO sortopt_test SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 'Device 1'; +ANALYZE sortopt_test; +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- print chunks ordered by time to ensure ordering we want +SELECT + ht.table_name AS hypertable, + c.table_name AS chunk, + ds.range_start +FROM + _timescaledb_catalog.chunk c + INNER JOIN LATERAL(SELECT * FROM _timescaledb_catalog.chunk_constraint cc WHERE c.id = cc.chunk_id ORDER BY cc.dimension_slice_id LIMIT 1) cc ON true + INNER JOIN _timescaledb_catalog.dimension_slice ds ON ds.id=cc.dimension_slice_id + INNER JOIN _timescaledb_catalog.dimension d ON ds.dimension_id = d.id + INNER JOIN _timescaledb_catalog.hypertable ht ON d.hypertable_id = ht.id +ORDER BY ht.table_name, range_start, chunk; + hypertable | chunk | range_start +------------------------+-------------------+---------------------- + dimension_last | _hyper_2_4_chunk | 946684800000000 + dimension_last | _hyper_2_5_chunk | 946771200000000 + dimension_last | _hyper_2_6_chunk | 946857600000000 + dimension_last | _hyper_2_7_chunk | 946944000000000 + dimension_only | _hyper_3_8_chunk | 946684800000000 + dimension_only | _hyper_3_9_chunk | 946857600000000 + dimension_only | _hyper_3_10_chunk | 947030400000000 + dimension_only | _hyper_3_11_chunk | 947203200000000 + ht_dropped_columns | _hyper_5_15_chunk | 946512000000000 + ht_dropped_columns | _hyper_5_16_chunk | 947116800000000 + ht_dropped_columns | _hyper_5_17_chunk | 947721600000000 + ht_dropped_columns | _hyper_5_18_chunk | 948326400000000 + ht_dropped_columns | _hyper_5_19_chunk | 948931200000000 + ht_missing_indexes | _hyper_4_12_chunk | 946512000000000 + ht_missing_indexes | _hyper_4_13_chunk | 947116800000000 + ht_missing_indexes | _hyper_4_14_chunk | 947721600000000 + ordered_append_reverse | _hyper_1_3_chunk | 946512000000000 + ordered_append_reverse | _hyper_1_2_chunk | 947116800000000 + ordered_append_reverse | _hyper_1_1_chunk | 947721600000000 + sortopt_test | _hyper_8_55_chunk | 946512000000000 + sortopt_test | _hyper_8_54_chunk | 947116800000000 + space2 | _hyper_6_21_chunk | -9223372036854775808 + space2 | _hyper_6_23_chunk | -9223372036854775808 + space2 | _hyper_6_25_chunk | -9223372036854775808 + space2 | _hyper_6_27_chunk | -9223372036854775808 + space2 | _hyper_6_33_chunk | -9223372036854775808 + space2 | _hyper_6_29_chunk | 946512000000000 + space2 | _hyper_6_31_chunk | 946512000000000 + space2 | _hyper_6_35_chunk | 946512000000000 + space2 | _hyper_6_37_chunk | 946512000000000 + space2 | _hyper_6_20_chunk | 947116800000000 + space2 | _hyper_6_22_chunk | 947116800000000 + space2 | _hyper_6_24_chunk | 947116800000000 + space2 | _hyper_6_26_chunk | 947116800000000 + space2 | _hyper_6_28_chunk | 947116800000000 + space2 | _hyper_6_30_chunk | 947116800000000 + space2 | _hyper_6_32_chunk | 947116800000000 + space2 | _hyper_6_34_chunk | 947116800000000 + space2 | _hyper_6_36_chunk | 947116800000000 + space3 | _hyper_7_39_chunk | -9223372036854775808 + space3 | _hyper_7_41_chunk | -9223372036854775808 + space3 | _hyper_7_43_chunk | -9223372036854775808 + space3 | _hyper_7_45_chunk | -9223372036854775808 + space3 | _hyper_7_47_chunk | -9223372036854775808 + space3 | _hyper_7_49_chunk | -9223372036854775808 + space3 | _hyper_7_51_chunk | -9223372036854775808 + space3 | _hyper_7_53_chunk | 946512000000000 + space3 | _hyper_7_38_chunk | 947116800000000 + space3 | _hyper_7_40_chunk | 947116800000000 + space3 | _hyper_7_42_chunk | 947116800000000 + space3 | _hyper_7_44_chunk | 947116800000000 + space3 | _hyper_7_46_chunk | 947116800000000 + space3 | _hyper_7_48_chunk | 947116800000000 + space3 | _hyper_7_50_chunk | 947116800000000 + space3 | _hyper_7_52_chunk | 947116800000000 +(55 rows) + +-- test ASC for reverse ordered chunks +:PREFIX SELECT + time, device_id, value +FROM ordered_append_reverse +ORDER BY time ASC LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ordered_append_reverse (actual rows=1 loops=1) + Order: ordered_append_reverse."time" + -> Index Scan Backward using _hyper_1_3_chunk_ordered_append_reverse_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_2_chunk_ordered_append_reverse_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan Backward using _hyper_1_1_chunk_ordered_append_reverse_time_idx on _hyper_1_1_chunk (never executed) +(6 rows) + +-- test DESC for reverse ordered chunks +:PREFIX SELECT + time, device_id, value +FROM ordered_append_reverse +ORDER BY time DESC LIMIT 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ordered_append_reverse (actual rows=1 loops=1) + Order: ordered_append_reverse."time" DESC + -> Index Scan using _hyper_1_1_chunk_ordered_append_reverse_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_1_2_chunk_ordered_append_reverse_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan using _hyper_1_3_chunk_ordered_append_reverse_time_idx on _hyper_1_3_chunk (never executed) +(6 rows) + +-- test query with ORDER BY time_bucket, device_id +-- must not use ordered append +:PREFIX SELECT + time_bucket('1d',time), device_id, name +FROM dimension_last +ORDER BY time_bucket('1d',time), device_id LIMIT 1; + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_2_4_chunk."time")), _hyper_2_4_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5760 loops=1) + -> Append (actual rows=5760 loops=1) + -> Seq Scan on _hyper_2_4_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_5_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_6_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=1440 loops=1) +(10 rows) + +-- test query with ORDER BY date_trunc, device_id +-- must not use ordered append +:PREFIX SELECT + date_trunc('day',time), device_id, name +FROM dimension_last +ORDER BY 1,2 LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_2_4_chunk."time")), _hyper_2_4_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5760 loops=1) + -> Append (actual rows=5760 loops=1) + -> Seq Scan on _hyper_2_4_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_5_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_6_chunk (actual rows=1440 loops=1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=1440 loops=1) +(10 rows) + +-- test with table with only dimension column +:PREFIX SELECT * FROM dimension_only ORDER BY time DESC LIMIT 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on dimension_only (actual rows=1 loops=1) + Order: dimension_only."time" DESC + -> Index Only Scan using _hyper_3_11_chunk_dimension_only_time_idx on _hyper_3_11_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_3_10_chunk_dimension_only_time_idx on _hyper_3_10_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_3_9_chunk_dimension_only_time_idx on _hyper_3_9_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_3_8_chunk_dimension_only_time_idx on _hyper_3_8_chunk (never executed) + Heap Fetches: 0 +(11 rows) + +-- test LEFT JOIN against hypertable +:PREFIX_NO_ANALYZE SELECT * +FROM dimension_last +LEFT JOIN dimension_only USING (time) +ORDER BY dimension_last.time DESC +LIMIT 2; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Limit + -> Nested Loop Left Join + Join Filter: (dimension_last."time" = _hyper_3_11_chunk."time") + -> Custom Scan (ChunkAppend) on dimension_last + Order: dimension_last."time" DESC + -> Index Scan using _hyper_2_7_chunk_dimension_last_time_idx on _hyper_2_7_chunk + -> Index Scan using _hyper_2_6_chunk_dimension_last_time_idx on _hyper_2_6_chunk + -> Index Scan using _hyper_2_5_chunk_dimension_last_time_idx on _hyper_2_5_chunk + -> Index Scan using _hyper_2_4_chunk_dimension_last_time_idx on _hyper_2_4_chunk + -> Materialize + -> Append + -> Seq Scan on _hyper_3_11_chunk + -> Seq Scan on _hyper_3_10_chunk + -> Seq Scan on _hyper_3_9_chunk + -> Seq Scan on _hyper_3_8_chunk +(15 rows) + +-- test INNER JOIN against non-hypertable +:PREFIX_NO_ANALYZE SELECT * +FROM dimension_last +INNER JOIN dimension_only USING (time) +ORDER BY dimension_last.time DESC +LIMIT 2; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Limit + -> Nested Loop + -> Custom Scan (ChunkAppend) on dimension_only + Order: dimension_only."time" DESC + -> Index Only Scan using _hyper_3_11_chunk_dimension_only_time_idx on _hyper_3_11_chunk + -> Index Only Scan using _hyper_3_10_chunk_dimension_only_time_idx on _hyper_3_10_chunk + -> Index Only Scan using _hyper_3_9_chunk_dimension_only_time_idx on _hyper_3_9_chunk + -> Index Only Scan using _hyper_3_8_chunk_dimension_only_time_idx on _hyper_3_8_chunk + -> Append + -> Index Scan using _hyper_2_7_chunk_dimension_last_time_idx on _hyper_2_7_chunk + Index Cond: ("time" = dimension_only."time") + -> Index Scan using _hyper_2_6_chunk_dimension_last_time_idx on _hyper_2_6_chunk + Index Cond: ("time" = dimension_only."time") + -> Index Scan using _hyper_2_5_chunk_dimension_last_time_idx on _hyper_2_5_chunk + Index Cond: ("time" = dimension_only."time") + -> Index Scan using _hyper_2_4_chunk_dimension_last_time_idx on _hyper_2_4_chunk + Index Cond: ("time" = dimension_only."time") +(17 rows) + +-- test join against non-hypertable +:PREFIX SELECT * +FROM dimension_last +INNER JOIN devices USING(device_id) +ORDER BY dimension_last.time DESC +LIMIT 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=2 loops=1) + -> Nested Loop (actual rows=2 loops=1) + Join Filter: (devices.device_id = dimension_last.device_id) + -> Custom Scan (ChunkAppend) on dimension_last (actual rows=2 loops=1) + Order: dimension_last."time" DESC + -> Index Scan using _hyper_2_7_chunk_dimension_last_time_idx on _hyper_2_7_chunk (actual rows=2 loops=1) + -> Index Scan using _hyper_2_6_chunk_dimension_last_time_idx on _hyper_2_6_chunk (never executed) + -> Index Scan using _hyper_2_5_chunk_dimension_last_time_idx on _hyper_2_5_chunk (never executed) + -> Index Scan using _hyper_2_4_chunk_dimension_last_time_idx on _hyper_2_4_chunk (never executed) + -> Materialize (actual rows=1 loops=2) + -> Seq Scan on devices (actual rows=1 loops=1) +(11 rows) + +-- test hypertable with index missing on one chunk +:PREFIX SELECT + time, device_id, value +FROM ht_missing_indexes +ORDER BY time ASC LIMIT 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_missing_indexes (actual rows=1 loops=1) + Order: ht_missing_indexes."time" + -> Index Scan Backward using _hyper_4_12_chunk_ht_missing_indexes_time_idx on _hyper_4_12_chunk (actual rows=1 loops=1) + -> Sort (never executed) + Sort Key: _hyper_4_13_chunk."time" + -> Seq Scan on _hyper_4_13_chunk (never executed) + -> Index Scan Backward using _hyper_4_14_chunk_ht_missing_indexes_time_idx on _hyper_4_14_chunk (never executed) +(8 rows) + +-- test hypertable with index missing on one chunk +-- and no data +:PREFIX SELECT + time, device_id, value +FROM ht_missing_indexes +WHERE device_id = 2 +ORDER BY time DESC LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_missing_indexes (actual rows=1 loops=1) + Order: ht_missing_indexes."time" DESC + -> Index Scan using _hyper_4_14_chunk_ht_missing_indexes_time_idx on _hyper_4_14_chunk (actual rows=1 loops=1) + Filter: (device_id = 2) + Rows Removed by Filter: 1 + -> Sort (never executed) + Sort Key: _hyper_4_13_chunk."time" DESC + -> Seq Scan on _hyper_4_13_chunk (never executed) + Filter: (device_id = 2) + -> Index Scan using _hyper_4_12_chunk_ht_missing_indexes_time_idx on _hyper_4_12_chunk (never executed) + Filter: (device_id = 2) +(12 rows) + +-- test hypertable with index missing on one chunk +-- and no data +:PREFIX SELECT + time, device_id, value +FROM ht_missing_indexes +WHERE time > '2000-01-07' +ORDER BY time LIMIT 10; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on ht_missing_indexes (actual rows=10 loops=1) + Order: ht_missing_indexes."time" + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_4_13_chunk."time" + Sort Method: top-N heapsort + -> Seq Scan on _hyper_4_13_chunk (actual rows=24477 loops=1) + Filter: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 5763 + -> Index Scan Backward using _hyper_4_14_chunk_ht_missing_indexes_time_idx on _hyper_4_14_chunk (never executed) + Index Cond: ("time" > 'Fri Jan 07 00:00:00 2000 PST'::timestamp with time zone) +(11 rows) + +-- test hypertable with dropped columns +:PREFIX SELECT + time, device_id, value +FROM ht_dropped_columns +ORDER BY time ASC LIMIT 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on ht_dropped_columns (actual rows=1 loops=1) + Order: ht_dropped_columns."time" + -> Index Scan Backward using _hyper_5_15_chunk_ht_dropped_columns_time_idx on _hyper_5_15_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_5_16_chunk_ht_dropped_columns_time_idx on _hyper_5_16_chunk (never executed) + -> Index Scan Backward using _hyper_5_17_chunk_ht_dropped_columns_time_idx on _hyper_5_17_chunk (never executed) + -> Index Scan Backward using _hyper_5_18_chunk_ht_dropped_columns_time_idx on _hyper_5_18_chunk (never executed) + -> Index Scan Backward using _hyper_5_19_chunk_ht_dropped_columns_time_idx on _hyper_5_19_chunk (never executed) +(8 rows) + +-- test hypertable with dropped columns +:PREFIX SELECT + time, device_id, value +FROM ht_dropped_columns +WHERE device_id = 1 +ORDER BY time DESC; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on ht_dropped_columns (actual rows=7205 loops=1) + Order: ht_dropped_columns."time" DESC + -> Index Scan using _hyper_5_19_chunk_ht_dropped_columns_time_idx on _hyper_5_19_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_5_18_chunk_ht_dropped_columns_time_idx on _hyper_5_18_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_5_17_chunk_ht_dropped_columns_time_idx on _hyper_5_17_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_5_16_chunk_ht_dropped_columns_time_idx on _hyper_5_16_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) + -> Index Scan using _hyper_5_15_chunk_ht_dropped_columns_time_idx on _hyper_5_15_chunk (actual rows=1441 loops=1) + Filter: (device_id = 1) +(12 rows) + +-- test hypertable with 2 space dimensions +:PREFIX SELECT + time, device_id, value +FROM space2 +ORDER BY time DESC; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on space2 (actual rows=116649 loops=1) + Order: space2."time" DESC + -> Merge Append (actual rows=56169 loops=1) + Sort Key: _hyper_6_36_chunk."time" DESC + -> Index Scan using _hyper_6_36_chunk_space2_time_idx on _hyper_6_36_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_34_chunk_space2_time_idx on _hyper_6_34_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_32_chunk_space2_time_idx on _hyper_6_32_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_30_chunk_space2_time_idx on _hyper_6_30_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_28_chunk_space2_time_idx on _hyper_6_28_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_26_chunk_space2_time_idx on _hyper_6_26_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_24_chunk_space2_time_idx on _hyper_6_24_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_22_chunk_space2_time_idx on _hyper_6_22_chunk (actual rows=6241 loops=1) + -> Index Scan using _hyper_6_20_chunk_space2_time_idx on _hyper_6_20_chunk (actual rows=6241 loops=1) + -> Merge Append (actual rows=60480 loops=1) + Sort Key: _hyper_6_37_chunk."time" DESC + -> Index Scan using _hyper_6_37_chunk_space2_time_idx on _hyper_6_37_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_35_chunk_space2_time_idx on _hyper_6_35_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_33_chunk_space2_time_idx on _hyper_6_33_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_31_chunk_space2_time_idx on _hyper_6_31_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_29_chunk_space2_time_idx on _hyper_6_29_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_27_chunk_space2_time_idx on _hyper_6_27_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_25_chunk_space2_time_idx on _hyper_6_25_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_23_chunk_space2_time_idx on _hyper_6_23_chunk (actual rows=6720 loops=1) + -> Index Scan using _hyper_6_21_chunk_space2_time_idx on _hyper_6_21_chunk (actual rows=6720 loops=1) +(24 rows) + +-- test hypertable with 3 space dimensions +:PREFIX SELECT + time +FROM space3 +ORDER BY time DESC; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on space3 (actual rows=103688 loops=1) + Order: space3."time" DESC + -> Merge Append (actual rows=49928 loops=1) + Sort Key: _hyper_7_52_chunk."time" DESC + -> Index Only Scan using _hyper_7_52_chunk_space3_time_idx on _hyper_7_52_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_50_chunk_space3_time_idx on _hyper_7_50_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_48_chunk_space3_time_idx on _hyper_7_48_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_46_chunk_space3_time_idx on _hyper_7_46_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_44_chunk_space3_time_idx on _hyper_7_44_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_42_chunk_space3_time_idx on _hyper_7_42_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_40_chunk_space3_time_idx on _hyper_7_40_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Index Only Scan using _hyper_7_38_chunk_space3_time_idx on _hyper_7_38_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 + -> Merge Append (actual rows=53760 loops=1) + Sort Key: _hyper_7_53_chunk."time" DESC + -> Index Only Scan using _hyper_7_53_chunk_space3_time_idx on _hyper_7_53_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_51_chunk_space3_time_idx on _hyper_7_51_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_49_chunk_space3_time_idx on _hyper_7_49_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_47_chunk_space3_time_idx on _hyper_7_47_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_45_chunk_space3_time_idx on _hyper_7_45_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_43_chunk_space3_time_idx on _hyper_7_43_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_41_chunk_space3_time_idx on _hyper_7_41_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_7_39_chunk_space3_time_idx on _hyper_7_39_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 +(38 rows) + +-- test COLLATION +-- cant be tested in our ci because alpine doesnt support locales +-- :PREFIX SELECT * FROM sortopt_test ORDER BY time, device COLLATE "en_US.utf8"; +-- test NULLS FIRST +:PREFIX SELECT * FROM sortopt_test ORDER BY time, device NULLS FIRST; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on sortopt_test (actual rows=12961 loops=1) + Order: sortopt_test."time", sortopt_test.device NULLS FIRST + -> Index Only Scan using _hyper_8_55_chunk_time_device_nullsfirst on _hyper_8_55_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_8_54_chunk_time_device_nullsfirst on _hyper_8_54_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 +(6 rows) + +-- test NULLS LAST +:PREFIX SELECT * FROM sortopt_test ORDER BY time, device DESC NULLS LAST; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on sortopt_test (actual rows=12961 loops=1) + Order: sortopt_test."time", sortopt_test.device DESC NULLS LAST + -> Index Only Scan using _hyper_8_55_chunk_time_device_nullslast on _hyper_8_55_chunk (actual rows=6720 loops=1) + Heap Fetches: 6720 + -> Index Only Scan using _hyper_8_54_chunk_time_device_nullslast on _hyper_8_54_chunk (actual rows=6241 loops=1) + Heap Fetches: 6241 +(6 rows) + +--generate the results into two different files +\set ECHO errors diff --git a/test/expected/timestamp.out b/test/expected/timestamp-13.out similarity index 100% rename from test/expected/timestamp.out rename to test/expected/timestamp-13.out diff --git a/test/expected/timestamp-14.out b/test/expected/timestamp-14.out new file mode 100644 index 00000000000..91ad38f2d88 --- /dev/null +++ b/test/expected/timestamp-14.out @@ -0,0 +1,2057 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- Utility function for grouping/slotting time with a given interval. +CREATE OR REPLACE FUNCTION date_group( + field timestamp, + group_interval interval +) + RETURNS timestamp LANGUAGE SQL STABLE AS +$BODY$ + SELECT to_timestamp((EXTRACT(EPOCH from $1)::int / + EXTRACT(EPOCH from group_interval)::int) * + EXTRACT(EPOCH from group_interval)::int)::timestamp; +$BODY$; +CREATE TABLE PUBLIC."testNs" ( + "timeCustom" TIMESTAMP NOT NULL, + device_id TEXT NOT NULL, + series_0 DOUBLE PRECISION NULL, + series_1 DOUBLE PRECISION NULL, + series_2 DOUBLE PRECISION NULL, + series_bool BOOLEAN NULL +); +CREATE INDEX ON PUBLIC."testNs" (device_id, "timeCustom" DESC NULLS LAST) WHERE device_id IS NOT NULL; +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE SCHEMA "testNs" AUTHORIZATION :ROLE_DEFAULT_PERM_USER; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT * FROM create_hypertable('"public"."testNs"', 'timeCustom', 'device_id', 2, associated_schema_name=>'testNs' ); +WARNING: column type "timestamp without time zone" used for "timeCustom" does not follow best practices + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 1 | public | testNs | t +(1 row) + +\c :TEST_DBNAME +INSERT INTO PUBLIC."testNs"("timeCustom", device_id, series_0, series_1) VALUES +('2009-11-12T01:00:00+00:00', 'dev1', 1.5, 1), +('2009-11-12T01:00:00+00:00', 'dev1', 1.5, 2), +('2009-11-10T23:00:02+00:00', 'dev1', 2.5, 3); +INSERT INTO PUBLIC."testNs"("timeCustom", device_id, series_0, series_1) VALUES +('2009-11-10T23:00:00+00:00', 'dev2', 1.5, 1), +('2009-11-10T23:00:00+00:00', 'dev2', 1.5, 2); +SELECT * FROM PUBLIC."testNs"; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +--------------------------+-----------+----------+----------+----------+------------- + Thu Nov 12 01:00:00 2009 | dev1 | 1.5 | 1 | | + Thu Nov 12 01:00:00 2009 | dev1 | 1.5 | 2 | | + Tue Nov 10 23:00:02 2009 | dev1 | 2.5 | 3 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 1 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 2 | | +(5 rows) + +SET client_min_messages = WARNING; +\echo 'The next 2 queries will differ in output between UTC and EST since the mod is on the 100th hour UTC' +The next 2 queries will differ in output between UTC and EST since the mod is on the 100th hour UTC +SET timezone = 'UTC'; +SELECT date_group("timeCustom", '100 days') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC; + time | sum +--------------------------+----- + Sun Sep 13 00:00:00 2009 | 8.5 +(1 row) + +SET timezone = 'EST'; +SELECT date_group("timeCustom", '100 days') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC; + time | sum +--------------------------+----- + Sat Sep 12 19:00:00 2009 | 8.5 +(1 row) + +\echo 'The rest of the queries will be the same in output between UTC and EST' +The rest of the queries will be the same in output between UTC and EST +SET timezone = 'UTC'; +SELECT date_group("timeCustom", '1 day') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC; + time | sum +--------------------------+----- + Tue Nov 10 00:00:00 2009 | 5.5 + Thu Nov 12 00:00:00 2009 | 3 +(2 rows) + +SET timezone = 'EST'; +SELECT date_group("timeCustom", '1 day') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC; + time | sum +--------------------------+----- + Mon Nov 09 19:00:00 2009 | 5.5 + Wed Nov 11 19:00:00 2009 | 3 +(2 rows) + +SET timezone = 'UTC'; +SELECT * +FROM PUBLIC."testNs" +WHERE "timeCustom" >= TIMESTAMP '2009-11-10T23:00:00' +AND "timeCustom" < TIMESTAMP '2009-11-12T01:00:00' ORDER BY "timeCustom" DESC, device_id, series_1; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +--------------------------+-----------+----------+----------+----------+------------- + Tue Nov 10 23:00:02 2009 | dev1 | 2.5 | 3 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 1 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 2 | | +(3 rows) + +SET timezone = 'EST'; +SELECT * +FROM PUBLIC."testNs" +WHERE "timeCustom" >= TIMESTAMP '2009-11-10T23:00:00' +AND "timeCustom" < TIMESTAMP '2009-11-12T01:00:00' ORDER BY "timeCustom" DESC, device_id, series_1; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +--------------------------+-----------+----------+----------+----------+------------- + Tue Nov 10 23:00:02 2009 | dev1 | 2.5 | 3 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 1 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 2 | | +(3 rows) + +SET timezone = 'UTC'; +SELECT date_group("timeCustom", '1 day') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC LIMIT 2; + time | sum +--------------------------+----- + Tue Nov 10 00:00:00 2009 | 5.5 + Thu Nov 12 00:00:00 2009 | 3 +(2 rows) + +SET timezone = 'EST'; +SELECT date_group("timeCustom", '1 day') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC LIMIT 2; + time | sum +--------------------------+----- + Mon Nov 09 19:00:00 2009 | 5.5 + Wed Nov 11 19:00:00 2009 | 3 +(2 rows) + +------------------------------------ +-- Test time conversion functions -- +------------------------------------ +\set ON_ERROR_STOP 0 +SET timezone = 'UTC'; +-- Conversion to timestamp using Postgres built-in function taking +-- double. Gives inaccurate result on Postgres <= 9.6.2. Accurate on +-- Postgres >= 9.6.3. +SELECT to_timestamp(1486480176.236538); + to_timestamp +------------------------------------- + Tue Feb 07 15:09:36.236538 2017 UTC +(1 row) + +-- extension-specific version taking microsecond UNIX timestamp +SELECT _timescaledb_functions.to_timestamp(1486480176236538); + to_timestamp +------------------------------------- + Tue Feb 07 15:09:36.236538 2017 UTC +(1 row) + +-- Should be the inverse of the statement above. +SELECT _timescaledb_functions.to_unix_microseconds('2017-02-07 15:09:36.236538+00'); + to_unix_microseconds +---------------------- + 1486480176236538 +(1 row) + +-- For timestamps, BIGINT MAX represents +Infinity and BIGINT MIN +-- -Infinity. We keep this notion for UNIX epoch time: +SELECT _timescaledb_functions.to_unix_microseconds('+infinity'); +ERROR: invalid input syntax for type timestamp with time zone: "+infinity" at character 52 +SELECT _timescaledb_functions.to_timestamp(9223372036854775807); + to_timestamp +-------------- + infinity +(1 row) + +SELECT _timescaledb_functions.to_unix_microseconds('-infinity'); + to_unix_microseconds +---------------------- + -9223372036854775808 +(1 row) + +SELECT _timescaledb_functions.to_timestamp(-9223372036854775808); + to_timestamp +-------------- + -infinity +(1 row) + +-- In UNIX microseconds, the largest bigint value below infinity +-- (BIGINT MAX) is smaller than internal date upper bound and should +-- therefore be OK. Further, converting to the internal postgres epoch +-- cannot overflow a 64-bit INTEGER since the postgres epoch is at a +-- later date compared to the UNIX epoch, and is therefore represented +-- by a smaller number +SELECT _timescaledb_functions.to_timestamp(9223372036854775806); + to_timestamp +--------------------------------------- + Sun Jan 10 04:00:54.775806 294247 UTC +(1 row) + +-- Julian day zero is -210866803200000000 microseconds from UNIX epoch +SELECT _timescaledb_functions.to_timestamp(-210866803200000000); + to_timestamp +--------------------------------- + Mon Nov 24 00:00:00 4714 UTC BC +(1 row) + +\set VERBOSITY default +-- Going beyond Julian day zero should give out-of-range error +SELECT _timescaledb_functions.to_timestamp(-210866803200000001); +ERROR: timestamp out of range +-- Lower bound on date (should return the Julian day zero UNIX timestamp above) +SELECT _timescaledb_functions.to_unix_microseconds('4714-11-24 00:00:00+00 BC'); + to_unix_microseconds +---------------------- + -210866803200000000 +(1 row) + +-- Going beyond lower bound on date should return out-of-range +SELECT _timescaledb_functions.to_unix_microseconds('4714-11-23 23:59:59.999999+00 BC'); +ERROR: timestamp out of range: "4714-11-23 23:59:59.999999+00 BC" +LINE 1: ...ELECT _timescaledb_functions.to_unix_microseconds('4714-11-2... + ^ +-- The upper bound for Postgres TIMESTAMPTZ +SELECT timestamp '294276-12-31 23:59:59.999999+00'; + timestamp +----------------------------------- + Sun Dec 31 23:59:59.999999 294276 +(1 row) + +-- Going beyond the upper bound, should fail +SELECT timestamp '294276-12-31 23:59:59.999999+00' + interval '1 us'; +ERROR: timestamp out of range +-- Cannot represent the upper bound timestamp with a UNIX microsecond timestamp +-- since the Postgres epoch is at a later date than the UNIX epoch. +SELECT _timescaledb_functions.to_unix_microseconds('294276-12-31 23:59:59.999999+00'); +ERROR: timestamp out of range +-- Subtracting the difference between the two epochs (10957 days) should bring +-- us within range. +SELECT timestamp '294276-12-31 23:59:59.999999+00' - interval '10957 days'; + ?column? +----------------------------------- + Fri Jan 01 23:59:59.999999 294247 +(1 row) + +SELECT _timescaledb_functions.to_unix_microseconds('294247-01-01 23:59:59.999999'); + to_unix_microseconds +---------------------- + 9223371331199999999 +(1 row) + +-- Adding one microsecond should take us out-of-range again +SELECT timestamp '294247-01-01 23:59:59.999999' + interval '1 us'; + ?column? +---------------------------- + Sat Jan 02 00:00:00 294247 +(1 row) + +SELECT _timescaledb_functions.to_unix_microseconds(timestamp '294247-01-01 23:59:59.999999' + interval '1 us'); +ERROR: timestamp out of range +--no time_bucketing of dates not by integer # of days +SELECT time_bucket('1 hour', DATE '2012-01-01'); +ERROR: interval must not have sub-day precision +SELECT time_bucket('25 hour', DATE '2012-01-01'); +ERROR: interval must be a multiple of a day +\set ON_ERROR_STOP 1 +SELECT time_bucket(INTERVAL '1 day', TIMESTAMP '2011-01-02 01:01:01'); + time_bucket +-------------------------- + Sun Jan 02 00:00:00 2011 +(1 row) + +SELECT time, time_bucket(INTERVAL '2 day ', time) +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-01 01:01:01', + TIMESTAMP '2011-01-02 01:01:01', + TIMESTAMP '2011-01-03 01:01:01', + TIMESTAMP '2011-01-04 01:01:01' + ]) AS time; + time | time_bucket +--------------------------+-------------------------- + Sat Jan 01 01:01:01 2011 | Sat Jan 01 00:00:00 2011 + Sun Jan 02 01:01:01 2011 | Sat Jan 01 00:00:00 2011 + Mon Jan 03 01:01:01 2011 | Mon Jan 03 00:00:00 2011 + Tue Jan 04 01:01:01 2011 | Mon Jan 03 00:00:00 2011 +(4 rows) + +SELECT int_def, time_bucket(int_def,TIMESTAMP '2011-01-02 01:01:01.111') +FROM unnest(ARRAY[ + INTERVAL '1 millisecond', + INTERVAL '1 second', + INTERVAL '1 minute', + INTERVAL '1 hour', + INTERVAL '1 day', + INTERVAL '2 millisecond', + INTERVAL '2 second', + INTERVAL '2 minute', + INTERVAL '2 hour', + INTERVAL '2 day' + ]) AS int_def; + int_def | time_bucket +--------------+------------------------------ + @ 0.001 secs | Sun Jan 02 01:01:01.111 2011 + @ 1 sec | Sun Jan 02 01:01:01 2011 + @ 1 min | Sun Jan 02 01:01:00 2011 + @ 1 hour | Sun Jan 02 01:00:00 2011 + @ 1 day | Sun Jan 02 00:00:00 2011 + @ 0.002 secs | Sun Jan 02 01:01:01.11 2011 + @ 2 secs | Sun Jan 02 01:01:00 2011 + @ 2 mins | Sun Jan 02 01:00:00 2011 + @ 2 hours | Sun Jan 02 00:00:00 2011 + @ 2 days | Sat Jan 01 00:00:00 2011 +(10 rows) + +\set ON_ERROR_STOP 0 +SELECT time_bucket(INTERVAL '1 year 1d',TIMESTAMP '2011-01-02 01:01:01.111'); +ERROR: month intervals cannot have day or time component +SELECT time_bucket(INTERVAL '1 month 1 minute',TIMESTAMP '2011-01-02 01:01:01.111'); +ERROR: month intervals cannot have day or time component +\set ON_ERROR_STOP 1 +SELECT time, time_bucket(INTERVAL '5 minute', time) +FROM unnest(ARRAY[ + TIMESTAMP '1970-01-01 00:59:59.999999', + TIMESTAMP '1970-01-01 01:01:00', + TIMESTAMP '1970-01-01 01:04:59.999999', + TIMESTAMP '1970-01-01 01:05:00' + ]) AS time; + time | time_bucket +---------------------------------+-------------------------- + Thu Jan 01 00:59:59.999999 1970 | Thu Jan 01 00:55:00 1970 + Thu Jan 01 01:01:00 1970 | Thu Jan 01 01:00:00 1970 + Thu Jan 01 01:04:59.999999 1970 | Thu Jan 01 01:00:00 1970 + Thu Jan 01 01:05:00 1970 | Thu Jan 01 01:05:00 1970 +(4 rows) + +SELECT time, time_bucket(INTERVAL '5 minute', time) +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:04:59.999999', + TIMESTAMP '2011-01-02 01:05:00', + TIMESTAMP '2011-01-02 01:09:59.999999', + TIMESTAMP '2011-01-02 01:10:00' + ]) AS time; + time | time_bucket +---------------------------------+-------------------------- + Sun Jan 02 01:04:59.999999 2011 | Sun Jan 02 01:00:00 2011 + Sun Jan 02 01:05:00 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:09:59.999999 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:10:00 2011 | Sun Jan 02 01:10:00 2011 +(4 rows) + +--offset with interval +SELECT time, time_bucket(INTERVAL '5 minute', time , INTERVAL '2 minutes') +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:01:59.999999', + TIMESTAMP '2011-01-02 01:02:00', + TIMESTAMP '2011-01-02 01:06:59.999999', + TIMESTAMP '2011-01-02 01:07:00' + ]) AS time; + time | time_bucket +---------------------------------+-------------------------- + Sun Jan 02 01:01:59.999999 2011 | Sun Jan 02 00:57:00 2011 + Sun Jan 02 01:02:00 2011 | Sun Jan 02 01:02:00 2011 + Sun Jan 02 01:06:59.999999 2011 | Sun Jan 02 01:02:00 2011 + Sun Jan 02 01:07:00 2011 | Sun Jan 02 01:07:00 2011 +(4 rows) + +SELECT time, time_bucket(INTERVAL '5 minute', time , - INTERVAL '2 minutes') +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:02:59.999999', + TIMESTAMP '2011-01-02 01:03:00', + TIMESTAMP '2011-01-02 01:07:59.999999', + TIMESTAMP '2011-01-02 01:08:00' + ]) AS time; + time | time_bucket +---------------------------------+-------------------------- + Sun Jan 02 01:02:59.999999 2011 | Sun Jan 02 00:58:00 2011 + Sun Jan 02 01:03:00 2011 | Sun Jan 02 01:03:00 2011 + Sun Jan 02 01:07:59.999999 2011 | Sun Jan 02 01:03:00 2011 + Sun Jan 02 01:08:00 2011 | Sun Jan 02 01:08:00 2011 +(4 rows) + +--offset with infinity +-- timestamp +SELECT time, time_bucket(INTERVAL '1 week', time, INTERVAL '1 day') +FROM unnest(ARRAY[ + timestamp '-Infinity', + timestamp 'Infinity' + ]) AS time; + time | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +-- timestamptz +SELECT time, time_bucket(INTERVAL '1 week', time, INTERVAL '1 day') +FROM unnest(ARRAY[ + timestamp with time zone '-Infinity', + timestamp with time zone 'Infinity' + ]) AS time; + time | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +-- Date +SELECT date, time_bucket(INTERVAL '1 week', date, INTERVAL '1 day') +FROM unnest(ARRAY[ + date '-Infinity', + date 'Infinity' + ]) AS date; + date | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +--example to align with an origin +SELECT time, time_bucket(INTERVAL '5 minute', time - (TIMESTAMP '2011-01-02 00:02:00' - TIMESTAMP 'epoch')) + (TIMESTAMP '2011-01-02 00:02:00'-TIMESTAMP 'epoch') +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:01:59.999999', + TIMESTAMP '2011-01-02 01:02:00', + TIMESTAMP '2011-01-02 01:06:59.999999', + TIMESTAMP '2011-01-02 01:07:00' + ]) AS time; + time | ?column? +---------------------------------+-------------------------- + Sun Jan 02 01:01:59.999999 2011 | Sun Jan 02 00:57:00 2011 + Sun Jan 02 01:02:00 2011 | Sun Jan 02 01:02:00 2011 + Sun Jan 02 01:06:59.999999 2011 | Sun Jan 02 01:02:00 2011 + Sun Jan 02 01:07:00 2011 | Sun Jan 02 01:07:00 2011 +(4 rows) + +--rounding version +SELECT time, time_bucket(INTERVAL '5 minute', time , - INTERVAL '2.5 minutes') + INTERVAL '2 minutes 30 seconds' +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:05:01', + TIMESTAMP '2011-01-02 01:07:29', + TIMESTAMP '2011-01-02 01:02:30', + TIMESTAMP '2011-01-02 01:07:30', + TIMESTAMP '2011-01-02 01:02:29' + ]) AS time; + time | ?column? +--------------------------+-------------------------- + Sun Jan 02 01:05:01 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:07:29 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:02:30 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:07:30 2011 | Sun Jan 02 01:10:00 2011 + Sun Jan 02 01:02:29 2011 | Sun Jan 02 01:00:00 2011 +(5 rows) + +--time_bucket with timezone should mimick date_trunc +SET timezone TO 'UTC'; +SELECT time, time_bucket(INTERVAL '1 hour', time), date_trunc('hour', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 UTC | Sun Jan 02 01:00:00 2011 UTC | Sun Jan 02 01:00:00 2011 UTC + Sun Jan 02 00:01:01 2011 UTC | Sun Jan 02 00:00:00 2011 UTC | Sun Jan 02 00:00:00 2011 UTC + Sat Jan 01 23:01:01 2011 UTC | Sat Jan 01 23:00:00 2011 UTC | Sat Jan 01 23:00:00 2011 UTC +(3 rows) + +SELECT time, time_bucket(INTERVAL '1 day', time), date_trunc('day', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 UTC | Sun Jan 02 00:00:00 2011 UTC | Sun Jan 02 00:00:00 2011 UTC + Sun Jan 02 00:01:01 2011 UTC | Sun Jan 02 00:00:00 2011 UTC | Sun Jan 02 00:00:00 2011 UTC + Sat Jan 01 23:01:01 2011 UTC | Sat Jan 01 00:00:00 2011 UTC | Sat Jan 01 00:00:00 2011 UTC +(3 rows) + +--what happens with a local tz +SET timezone TO 'America/New_York'; +SELECT time, time_bucket(INTERVAL '1 hour', time), date_trunc('hour', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 EST | Sun Jan 02 01:00:00 2011 EST | Sun Jan 02 01:00:00 2011 EST + Sat Jan 01 19:01:01 2011 EST | Sat Jan 01 19:00:00 2011 EST | Sat Jan 01 19:00:00 2011 EST + Sat Jan 01 18:01:01 2011 EST | Sat Jan 01 18:00:00 2011 EST | Sat Jan 01 18:00:00 2011 EST +(3 rows) + +--Note the timestamp tz input is aligned with UTC day /not/ local day. different than date_trunc. +SELECT time, time_bucket(INTERVAL '1 day', time), date_trunc('day', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-03 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-04 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 EST | Sat Jan 01 19:00:00 2011 EST | Sun Jan 02 00:00:00 2011 EST + Sun Jan 02 19:01:01 2011 EST | Sun Jan 02 19:00:00 2011 EST | Sun Jan 02 00:00:00 2011 EST + Mon Jan 03 18:01:01 2011 EST | Sun Jan 02 19:00:00 2011 EST | Mon Jan 03 00:00:00 2011 EST +(3 rows) + +--can force local bucketing with simple cast. +SELECT time, time_bucket(INTERVAL '1 day', time::timestamp), date_trunc('day', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-03 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-04 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+--------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 EST | Sun Jan 02 00:00:00 2011 | Sun Jan 02 00:00:00 2011 EST + Sun Jan 02 19:01:01 2011 EST | Sun Jan 02 00:00:00 2011 | Sun Jan 02 00:00:00 2011 EST + Mon Jan 03 18:01:01 2011 EST | Mon Jan 03 00:00:00 2011 | Mon Jan 03 00:00:00 2011 EST +(3 rows) + +--can also use interval to correct +SELECT time, time_bucket(INTERVAL '1 day', time, -INTERVAL '19 hours'), date_trunc('day', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-03 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-04 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 EST | Sun Jan 02 00:00:00 2011 EST | Sun Jan 02 00:00:00 2011 EST + Sun Jan 02 19:01:01 2011 EST | Sun Jan 02 00:00:00 2011 EST | Sun Jan 02 00:00:00 2011 EST + Mon Jan 03 18:01:01 2011 EST | Mon Jan 03 00:00:00 2011 EST | Mon Jan 03 00:00:00 2011 EST +(3 rows) + +--dst: same local hour bucketed as two different hours. +SELECT time, time_bucket(INTERVAL '1 hour', time), date_trunc('hour', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2017-11-05 12:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 13:05:00+07' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Nov 05 01:05:00 2017 EDT | Sun Nov 05 01:00:00 2017 EDT | Sun Nov 05 01:00:00 2017 EDT + Sun Nov 05 01:05:00 2017 EST | Sun Nov 05 01:00:00 2017 EST | Sun Nov 05 01:00:00 2017 EST +(2 rows) + +--local alignment changes when bucketing by UTC across dst boundary +SELECT time, time_bucket(INTERVAL '2 hour', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2017-11-05 10:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 12:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 13:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 15:05:00+07' + ]) AS time; + time | time_bucket +------------------------------+------------------------------ + Sat Nov 04 23:05:00 2017 EDT | Sat Nov 04 22:00:00 2017 EDT + Sun Nov 05 01:05:00 2017 EDT | Sun Nov 05 00:00:00 2017 EDT + Sun Nov 05 01:05:00 2017 EST | Sun Nov 05 01:00:00 2017 EST + Sun Nov 05 03:05:00 2017 EST | Sun Nov 05 03:00:00 2017 EST +(4 rows) + +--local alignment is preserved when bucketing by local time across DST boundary. +SELECT time, time_bucket(INTERVAL '2 hour', time::timestamp) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2017-11-05 10:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 12:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 13:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 15:05:00+07' + ]) AS time; + time | time_bucket +------------------------------+-------------------------- + Sat Nov 04 23:05:00 2017 EDT | Sat Nov 04 22:00:00 2017 + Sun Nov 05 01:05:00 2017 EDT | Sun Nov 05 00:00:00 2017 + Sun Nov 05 01:05:00 2017 EST | Sun Nov 05 00:00:00 2017 + Sun Nov 05 03:05:00 2017 EST | Sun Nov 05 02:00:00 2017 +(4 rows) + +SELECT time, + time_bucket(10::smallint, time) AS time_bucket_smallint, + time_bucket(10::int, time) AS time_bucket_int, + time_bucket(10::bigint, time) AS time_bucket_bigint +FROM unnest(ARRAY[ + '-11', + '-10', + '-9', + '-1', + '0', + '1', + '99', + '100', + '109', + '110' + ]::smallint[]) AS time; + time | time_bucket_smallint | time_bucket_int | time_bucket_bigint +------+----------------------+-----------------+-------------------- + -11 | -20 | -20 | -20 + -10 | -10 | -10 | -10 + -9 | -10 | -10 | -10 + -1 | -10 | -10 | -10 + 0 | 0 | 0 | 0 + 1 | 0 | 0 | 0 + 99 | 90 | 90 | 90 + 100 | 100 | 100 | 100 + 109 | 100 | 100 | 100 + 110 | 110 | 110 | 110 +(10 rows) + +SELECT time, + time_bucket(10::smallint, time, 2::smallint) AS time_bucket_smallint, + time_bucket(10::int, time, 2::int) AS time_bucket_int, + time_bucket(10::bigint, time, 2::bigint) AS time_bucket_bigint +FROM unnest(ARRAY[ + '-9', + '-8', + '-7', + '1', + '2', + '3', + '101', + '102', + '111', + '112' + ]::smallint[]) AS time; + time | time_bucket_smallint | time_bucket_int | time_bucket_bigint +------+----------------------+-----------------+-------------------- + -9 | -18 | -18 | -18 + -8 | -8 | -8 | -8 + -7 | -8 | -8 | -8 + 1 | -8 | -8 | -8 + 2 | 2 | 2 | 2 + 3 | 2 | 2 | 2 + 101 | 92 | 92 | 92 + 102 | 102 | 102 | 102 + 111 | 102 | 102 | 102 + 112 | 112 | 112 | 112 +(10 rows) + +SELECT time, + time_bucket(10::smallint, time, -2::smallint) AS time_bucket_smallint, + time_bucket(10::int, time, -2::int) AS time_bucket_int, + time_bucket(10::bigint, time, -2::bigint) AS time_bucket_bigint +FROM unnest(ARRAY[ + '-13', + '-12', + '-11', + '-3', + '-2', + '-1', + '97', + '98', + '107', + '108' + ]::smallint[]) AS time; + time | time_bucket_smallint | time_bucket_int | time_bucket_bigint +------+----------------------+-----------------+-------------------- + -13 | -22 | -22 | -22 + -12 | -12 | -12 | -12 + -11 | -12 | -12 | -12 + -3 | -12 | -12 | -12 + -2 | -2 | -2 | -2 + -1 | -2 | -2 | -2 + 97 | 88 | 88 | 88 + 98 | 98 | 98 | 98 + 107 | 98 | 98 | 98 + 108 | 108 | 108 | 108 +(10 rows) + +\set ON_ERROR_STOP 0 +SELECT time_bucket(10::smallint, '-32768'::smallint); +ERROR: timestamp out of range +SELECT time_bucket(10::smallint, '-32761'::smallint); +ERROR: timestamp out of range +select time_bucket(10::smallint, '-32768'::smallint, 1000::smallint); +ERROR: timestamp out of range +select time_bucket(10::smallint, '-32768'::smallint, '32767'::smallint); +ERROR: timestamp out of range +select time_bucket(10::smallint, '32767'::smallint, '-32768'::smallint); +ERROR: timestamp out of range +\set ON_ERROR_STOP 1 +SELECT time, time_bucket(10::smallint, time) +FROM unnest(ARRAY[ + '-32760', + '-32759', + '32767' + ]::smallint[]) AS time; + time | time_bucket +--------+------------- + -32760 | -32760 + -32759 | -32760 + 32767 | 32760 +(3 rows) + +\set ON_ERROR_STOP 0 +SELECT time_bucket(10::int, '-2147483648'::int); +ERROR: timestamp out of range +SELECT time_bucket(10::int, '-2147483641'::int); +ERROR: timestamp out of range +SELECT time_bucket(1000::int, '-2147483000'::int, 1::int); +ERROR: timestamp out of range +SELECT time_bucket(1000::int, '-2147483648'::int, '2147483647'::int); +ERROR: timestamp out of range +SELECT time_bucket(1000::int, '2147483647'::int, '-2147483648'::int); +ERROR: timestamp out of range +\set ON_ERROR_STOP 1 +SELECT time, time_bucket(10::int, time) +FROM unnest(ARRAY[ + '-2147483640', + '-2147483639', + '2147483647' + ]::int[]) AS time; + time | time_bucket +-------------+------------- + -2147483640 | -2147483640 + -2147483639 | -2147483640 + 2147483647 | 2147483640 +(3 rows) + +\set ON_ERROR_STOP 0 +SELECT time_bucket(10::bigint, '-9223372036854775808'::bigint); +ERROR: timestamp out of range +SELECT time_bucket(10::bigint, '-9223372036854775801'::bigint); +ERROR: timestamp out of range +SELECT time_bucket(1000::bigint, '-9223372036854775000'::bigint, 1::bigint); +ERROR: timestamp out of range +SELECT time_bucket(1000::bigint, '-9223372036854775808'::bigint, '9223372036854775807'::bigint); +ERROR: timestamp out of range +SELECT time_bucket(1000::bigint, '9223372036854775807'::bigint, '-9223372036854775808'::bigint); +ERROR: timestamp out of range +\set ON_ERROR_STOP 1 +SELECT time, time_bucket(10::bigint, time) +FROM unnest(ARRAY[ + '-9223372036854775800', + '-9223372036854775799', + '9223372036854775807' + ]::bigint[]) AS time; + time | time_bucket +----------------------+---------------------- + -9223372036854775800 | -9223372036854775800 + -9223372036854775799 | -9223372036854775800 + 9223372036854775807 | 9223372036854775800 +(3 rows) + +SELECT time, time_bucket(INTERVAL '1 day', time::date) +FROM unnest(ARRAY[ + date '2017-11-05', + date '2017-11-06' + ]) AS time; + time | time_bucket +------------+------------- + 11-05-2017 | 11-05-2017 + 11-06-2017 | 11-06-2017 +(2 rows) + +SELECT time, time_bucket(INTERVAL '4 day', time::date) +FROM unnest(ARRAY[ + date '2017-11-04', + date '2017-11-05', + date '2017-11-08', + date '2017-11-09' + ]) AS time; + time | time_bucket +------------+------------- + 11-04-2017 | 11-01-2017 + 11-05-2017 | 11-05-2017 + 11-08-2017 | 11-05-2017 + 11-09-2017 | 11-09-2017 +(4 rows) + +SELECT time, time_bucket(INTERVAL '4 day', time::date, INTERVAL '2 day') +FROM unnest(ARRAY[ + date '2017-11-06', + date '2017-11-07', + date '2017-11-10', + date '2017-11-11' + ]) AS time; + time | time_bucket +------------+------------- + 11-06-2017 | 11-03-2017 + 11-07-2017 | 11-07-2017 + 11-10-2017 | 11-07-2017 + 11-11-2017 | 11-11-2017 +(4 rows) + +-- 2019-09-24 is a Monday, and we want to ensure that time_bucket returns the week starting with a Monday as date_trunc does, +-- Rather than a Saturday which is the date of the PostgreSQL epoch +SELECT time, time_bucket(INTERVAL '1 week', time::date) +FROM unnest(ARRAY[ + date '2018-09-16', + date '2018-09-17', + date '2018-09-23', + date '2018-09-24' + ]) AS time; + time | time_bucket +------------+------------- + 09-16-2018 | 09-10-2018 + 09-17-2018 | 09-17-2018 + 09-23-2018 | 09-17-2018 + 09-24-2018 | 09-24-2018 +(4 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp without time zone '2018-09-16', + timestamp without time zone '2018-09-17', + timestamp without time zone '2018-09-23', + timestamp without time zone '2018-09-24' + ]) AS time; + time | time_bucket +--------------------------+-------------------------- + Sun Sep 16 00:00:00 2018 | Mon Sep 10 00:00:00 2018 + Mon Sep 17 00:00:00 2018 | Mon Sep 17 00:00:00 2018 + Sun Sep 23 00:00:00 2018 | Mon Sep 17 00:00:00 2018 + Mon Sep 24 00:00:00 2018 | Mon Sep 24 00:00:00 2018 +(4 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp with time zone '2018-09-16', + timestamp with time zone '2018-09-17', + timestamp with time zone '2018-09-23', + timestamp with time zone '2018-09-24' + ]) AS time; + time | time_bucket +------------------------------+------------------------------ + Sun Sep 16 00:00:00 2018 EDT | Sun Sep 09 20:00:00 2018 EDT + Mon Sep 17 00:00:00 2018 EDT | Sun Sep 16 20:00:00 2018 EDT + Sun Sep 23 00:00:00 2018 EDT | Sun Sep 16 20:00:00 2018 EDT + Mon Sep 24 00:00:00 2018 EDT | Sun Sep 23 20:00:00 2018 EDT +(4 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp with time zone '-Infinity', + timestamp with time zone 'Infinity' + ]) AS time; + time | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp without time zone '-Infinity', + timestamp without time zone 'Infinity' + ]) AS time; + time | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time), date_trunc('week', time) = time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp without time zone '4714-11-24 01:01:01.0 BC', + timestamp without time zone '294276-12-31 23:59:59.9999' + ]) AS time; + time | time_bucket | ?column? +---------------------------------+-----------------------------+---------- + Mon Nov 24 01:01:01 4714 BC | Mon Nov 24 00:00:00 4714 BC | t + Sun Dec 31 23:59:59.9999 294276 | Mon Dec 25 00:00:00 294276 | t +(2 rows) + +--1000 years later weeks still align. +SELECT time, time_bucket(INTERVAL '1 week', time), date_trunc('week', time) = time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp without time zone '3018-09-14', + timestamp without time zone '3018-09-20', + timestamp without time zone '3018-09-21', + timestamp without time zone '3018-09-22' + ]) AS time; + time | time_bucket | ?column? +--------------------------+--------------------------+---------- + Mon Sep 14 00:00:00 3018 | Mon Sep 14 00:00:00 3018 | t + Sun Sep 20 00:00:00 3018 | Mon Sep 14 00:00:00 3018 | t + Mon Sep 21 00:00:00 3018 | Mon Sep 21 00:00:00 3018 | t + Tue Sep 22 00:00:00 3018 | Mon Sep 21 00:00:00 3018 | t +(4 rows) + +--weeks align for timestamptz as well if cast to local time, (but not if done at UTC). +SELECT time, date_trunc('week', time) = time_bucket(INTERVAL '1 week', time), date_trunc('week', time) = time_bucket(INTERVAL '1 week', time::timestamp) +FROM unnest(ARRAY[ + timestamp with time zone '3018-09-14', + timestamp with time zone '3018-09-20', + timestamp with time zone '3018-09-21', + timestamp with time zone '3018-09-22' + ]) AS time; + time | ?column? | ?column? +------------------------------+----------+---------- + Mon Sep 14 00:00:00 3018 EDT | f | t + Sun Sep 20 00:00:00 3018 EDT | f | t + Mon Sep 21 00:00:00 3018 EDT | f | t + Tue Sep 22 00:00:00 3018 EDT | f | t +(4 rows) + +--check functions with origin +--note that the default origin is at 0 UTC, using origin parameter it is easy to provide a EDT origin point +\x +SELECT time, time_bucket(INTERVAL '1 week', time) no_epoch, + time_bucket(INTERVAL '1 week', time::timestamp) no_epoch_local, + time_bucket(INTERVAL '1 week', time) = time_bucket(INTERVAL '1 week', time, timestamptz '2000-01-03 00:00:00+0') always_true, + time_bucket(INTERVAL '1 week', time, timestamptz '2000-01-01 00:00:00+0') pg_epoch, + time_bucket(INTERVAL '1 week', time, timestamptz 'epoch') unix_epoch, + time_bucket(INTERVAL '1 week', time, timestamptz '3018-09-13') custom_1, + time_bucket(INTERVAL '1 week', time, timestamptz '3018-09-14') custom_2 +FROM unnest(ARRAY[ + timestamp with time zone '2000-01-01 00:00:00+0'- interval '1 second', + timestamp with time zone '2000-01-01 00:00:00+0', + timestamp with time zone '2000-01-03 00:00:00+0'- interval '1 second', + timestamp with time zone '2000-01-03 00:00:00+0', + timestamp with time zone '2000-01-01', + timestamp with time zone '2000-01-02', + timestamp with time zone '2000-01-03', + timestamp with time zone '3018-09-12', + timestamp with time zone '3018-09-13', + timestamp with time zone '3018-09-14', + timestamp with time zone '3018-09-15' + ]) AS time; +-[ RECORD 1 ]--+----------------------------- +time | Fri Dec 31 18:59:59 1999 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 24 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Dec 25 23:00:00 1999 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 2 ]--+----------------------------- +time | Fri Dec 31 19:00:00 1999 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Dec 25 23:00:00 1999 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 3 ]--+----------------------------- +time | Sun Jan 02 18:59:59 2000 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Jan 01 23:00:00 2000 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 4 ]--+----------------------------- +time | Sun Jan 02 19:00:00 2000 EST +no_epoch | Sun Jan 02 19:00:00 2000 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Jan 01 23:00:00 2000 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 5 ]--+----------------------------- +time | Sat Jan 01 00:00:00 2000 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Dec 25 23:00:00 1999 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 6 ]--+----------------------------- +time | Sun Jan 02 00:00:00 2000 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Jan 01 23:00:00 2000 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 7 ]--+----------------------------- +time | Mon Jan 03 00:00:00 2000 EST +no_epoch | Sun Jan 02 19:00:00 2000 EST +no_epoch_local | Mon Jan 03 00:00:00 2000 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Jan 01 23:00:00 2000 EST +custom_2 | Sun Jan 02 23:00:00 2000 EST +-[ RECORD 8 ]--+----------------------------- +time | Sat Sep 12 00:00:00 3018 EDT +no_epoch | Sun Sep 06 20:00:00 3018 EDT +no_epoch_local | Mon Sep 07 00:00:00 3018 +always_true | t +pg_epoch | Fri Sep 11 20:00:00 3018 EDT +unix_epoch | Wed Sep 09 20:00:00 3018 EDT +custom_1 | Sun Sep 06 00:00:00 3018 EDT +custom_2 | Mon Sep 07 00:00:00 3018 EDT +-[ RECORD 9 ]--+----------------------------- +time | Sun Sep 13 00:00:00 3018 EDT +no_epoch | Sun Sep 06 20:00:00 3018 EDT +no_epoch_local | Mon Sep 07 00:00:00 3018 +always_true | t +pg_epoch | Fri Sep 11 20:00:00 3018 EDT +unix_epoch | Wed Sep 09 20:00:00 3018 EDT +custom_1 | Sun Sep 13 00:00:00 3018 EDT +custom_2 | Mon Sep 07 00:00:00 3018 EDT +-[ RECORD 10 ]-+----------------------------- +time | Mon Sep 14 00:00:00 3018 EDT +no_epoch | Sun Sep 13 20:00:00 3018 EDT +no_epoch_local | Mon Sep 14 00:00:00 3018 +always_true | t +pg_epoch | Fri Sep 11 20:00:00 3018 EDT +unix_epoch | Wed Sep 09 20:00:00 3018 EDT +custom_1 | Sun Sep 13 00:00:00 3018 EDT +custom_2 | Mon Sep 14 00:00:00 3018 EDT +-[ RECORD 11 ]-+----------------------------- +time | Tue Sep 15 00:00:00 3018 EDT +no_epoch | Sun Sep 13 20:00:00 3018 EDT +no_epoch_local | Mon Sep 14 00:00:00 3018 +always_true | t +pg_epoch | Fri Sep 11 20:00:00 3018 EDT +unix_epoch | Wed Sep 09 20:00:00 3018 EDT +custom_1 | Sun Sep 13 00:00:00 3018 EDT +custom_2 | Mon Sep 14 00:00:00 3018 EDT + +SELECT time, time_bucket(INTERVAL '1 week', time) no_epoch, + time_bucket(INTERVAL '1 week', time) = time_bucket(INTERVAL '1 week', time, timestamp '2000-01-03 00:00:00') always_true, + time_bucket(INTERVAL '1 week', time, timestamp '2000-01-01 00:00:00+0') pg_epoch, + time_bucket(INTERVAL '1 week', time, timestamp 'epoch') unix_epoch, + time_bucket(INTERVAL '1 week', time, timestamp '3018-09-13') custom_1, + time_bucket(INTERVAL '1 week', time, timestamp '3018-09-14') custom_2 +FROM unnest(ARRAY[ + timestamp without time zone '2000-01-01 00:00:00'- interval '1 second', + timestamp without time zone '2000-01-01 00:00:00', + timestamp without time zone '2000-01-03 00:00:00'- interval '1 second', + timestamp without time zone '2000-01-03 00:00:00', + timestamp without time zone '2000-01-01', + timestamp without time zone '2000-01-02', + timestamp without time zone '2000-01-03', + timestamp without time zone '3018-09-12', + timestamp without time zone '3018-09-13', + timestamp without time zone '3018-09-14', + timestamp without time zone '3018-09-15' + ]) AS time; +-[ RECORD 1 ]------------------------- +time | Fri Dec 31 23:59:59 1999 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Dec 25 00:00:00 1999 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Dec 26 00:00:00 1999 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 2 ]------------------------- +time | Sat Jan 01 00:00:00 2000 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Dec 26 00:00:00 1999 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 3 ]------------------------- +time | Sun Jan 02 23:59:59 2000 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Jan 02 00:00:00 2000 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 4 ]------------------------- +time | Mon Jan 03 00:00:00 2000 +no_epoch | Mon Jan 03 00:00:00 2000 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Jan 02 00:00:00 2000 +custom_2 | Mon Jan 03 00:00:00 2000 +-[ RECORD 5 ]------------------------- +time | Sat Jan 01 00:00:00 2000 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Dec 26 00:00:00 1999 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 6 ]------------------------- +time | Sun Jan 02 00:00:00 2000 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Jan 02 00:00:00 2000 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 7 ]------------------------- +time | Mon Jan 03 00:00:00 2000 +no_epoch | Mon Jan 03 00:00:00 2000 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Jan 02 00:00:00 2000 +custom_2 | Mon Jan 03 00:00:00 2000 +-[ RECORD 8 ]------------------------- +time | Sat Sep 12 00:00:00 3018 +no_epoch | Mon Sep 07 00:00:00 3018 +always_true | t +pg_epoch | Sat Sep 12 00:00:00 3018 +unix_epoch | Thu Sep 10 00:00:00 3018 +custom_1 | Sun Sep 06 00:00:00 3018 +custom_2 | Mon Sep 07 00:00:00 3018 +-[ RECORD 9 ]------------------------- +time | Sun Sep 13 00:00:00 3018 +no_epoch | Mon Sep 07 00:00:00 3018 +always_true | t +pg_epoch | Sat Sep 12 00:00:00 3018 +unix_epoch | Thu Sep 10 00:00:00 3018 +custom_1 | Sun Sep 13 00:00:00 3018 +custom_2 | Mon Sep 07 00:00:00 3018 +-[ RECORD 10 ]------------------------ +time | Mon Sep 14 00:00:00 3018 +no_epoch | Mon Sep 14 00:00:00 3018 +always_true | t +pg_epoch | Sat Sep 12 00:00:00 3018 +unix_epoch | Thu Sep 10 00:00:00 3018 +custom_1 | Sun Sep 13 00:00:00 3018 +custom_2 | Mon Sep 14 00:00:00 3018 +-[ RECORD 11 ]------------------------ +time | Tue Sep 15 00:00:00 3018 +no_epoch | Mon Sep 14 00:00:00 3018 +always_true | t +pg_epoch | Sat Sep 12 00:00:00 3018 +unix_epoch | Thu Sep 10 00:00:00 3018 +custom_1 | Sun Sep 13 00:00:00 3018 +custom_2 | Mon Sep 14 00:00:00 3018 + +SELECT time, time_bucket(INTERVAL '1 week', time) no_epoch, + time_bucket(INTERVAL '1 week', time) = time_bucket(INTERVAL '1 week', time, date '2000-01-03') always_true, + time_bucket(INTERVAL '1 week', time, date '2000-01-01') pg_epoch, + time_bucket(INTERVAL '1 week', time, (timestamp 'epoch')::date) unix_epoch, + time_bucket(INTERVAL '1 week', time, date '3018-09-13') custom_1, + time_bucket(INTERVAL '1 week', time, date '3018-09-14') custom_2 +FROM unnest(ARRAY[ + date '1999-12-31', + date '2000-01-01', + date '2000-01-02', + date '2000-01-03', + date '3018-09-12', + date '3018-09-13', + date '3018-09-14', + date '3018-09-15' + ]) AS time; +-[ RECORD 1 ]----------- +time | 12-31-1999 +no_epoch | 12-27-1999 +always_true | t +pg_epoch | 12-25-1999 +unix_epoch | 12-30-1999 +custom_1 | 12-26-1999 +custom_2 | 12-27-1999 +-[ RECORD 2 ]----------- +time | 01-01-2000 +no_epoch | 12-27-1999 +always_true | t +pg_epoch | 01-01-2000 +unix_epoch | 12-30-1999 +custom_1 | 12-26-1999 +custom_2 | 12-27-1999 +-[ RECORD 3 ]----------- +time | 01-02-2000 +no_epoch | 12-27-1999 +always_true | t +pg_epoch | 01-01-2000 +unix_epoch | 12-30-1999 +custom_1 | 01-02-2000 +custom_2 | 12-27-1999 +-[ RECORD 4 ]----------- +time | 01-03-2000 +no_epoch | 01-03-2000 +always_true | t +pg_epoch | 01-01-2000 +unix_epoch | 12-30-1999 +custom_1 | 01-02-2000 +custom_2 | 01-03-2000 +-[ RECORD 5 ]----------- +time | 09-12-3018 +no_epoch | 09-07-3018 +always_true | t +pg_epoch | 09-12-3018 +unix_epoch | 09-10-3018 +custom_1 | 09-06-3018 +custom_2 | 09-07-3018 +-[ RECORD 6 ]----------- +time | 09-13-3018 +no_epoch | 09-07-3018 +always_true | t +pg_epoch | 09-12-3018 +unix_epoch | 09-10-3018 +custom_1 | 09-13-3018 +custom_2 | 09-07-3018 +-[ RECORD 7 ]----------- +time | 09-14-3018 +no_epoch | 09-14-3018 +always_true | t +pg_epoch | 09-12-3018 +unix_epoch | 09-10-3018 +custom_1 | 09-13-3018 +custom_2 | 09-14-3018 +-[ RECORD 8 ]----------- +time | 09-15-3018 +no_epoch | 09-14-3018 +always_true | t +pg_epoch | 09-12-3018 +unix_epoch | 09-10-3018 +custom_1 | 09-13-3018 +custom_2 | 09-14-3018 + +\x +--really old origin works if date around that time +SELECT time, time_bucket(INTERVAL '1 week', time, timestamp without time zone '4710-11-24 01:01:01.0 BC') +FROM unnest(ARRAY[ + timestamp without time zone '4710-11-24 01:01:01.0 BC', + timestamp without time zone '4710-11-25 01:01:01.0 BC', + timestamp without time zone '2001-01-01', + timestamp without time zone '3001-01-01' + ]) AS time; + time | time_bucket +-----------------------------+----------------------------- + Sat Nov 24 01:01:01 4710 BC | Sat Nov 24 01:01:01 4710 BC + Sun Nov 25 01:01:01 4710 BC | Sat Nov 24 01:01:01 4710 BC + Mon Jan 01 00:00:00 2001 | Sat Dec 30 01:01:01 2000 + Thu Jan 01 00:00:00 3001 | Sat Dec 27 01:01:01 3000 +(4 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time, timestamp without time zone '294270-12-30 23:59:59.9999') +FROM unnest(ARRAY[ + timestamp without time zone '294270-12-29 23:59:59.9999', + timestamp without time zone '294270-12-30 23:59:59.9999', + timestamp without time zone '294270-12-31 23:59:59.9999', + timestamp without time zone '2001-01-01', + timestamp without time zone '3001-01-01' + ]) AS time; + time | time_bucket +---------------------------------+--------------------------------- + Thu Dec 29 23:59:59.9999 294270 | Fri Dec 23 23:59:59.9999 294270 + Fri Dec 30 23:59:59.9999 294270 | Fri Dec 30 23:59:59.9999 294270 + Sat Dec 31 23:59:59.9999 294270 | Fri Dec 30 23:59:59.9999 294270 + Mon Jan 01 00:00:00 2001 | Fri Dec 29 23:59:59.9999 2000 + Thu Jan 01 00:00:00 3001 | Fri Dec 26 23:59:59.9999 3000 +(5 rows) + +\set ON_ERROR_STOP 0 +--really old origin + very new data + long period errors +SELECT time, time_bucket(INTERVAL '100000 day', time, timestamp without time zone '4710-11-24 01:01:01.0 BC') +FROM unnest(ARRAY[ + timestamp without time zone '294270-12-31 23:59:59.9999' + ]) AS time; +ERROR: timestamp out of range +SELECT time, time_bucket(INTERVAL '100000 day', time, timestamp with time zone '4710-11-25 01:01:01.0 BC') +FROM unnest(ARRAY[ + timestamp with time zone '294270-12-30 23:59:59.9999' + ]) AS time; +ERROR: timestamp out of range +--really high origin + old data + long period errors out +SELECT time, time_bucket(INTERVAL '10000000 day', time, timestamp without time zone '294270-12-31 23:59:59.9999') +FROM unnest(ARRAY[ + timestamp without time zone '4710-11-24 01:01:01.0 BC' + ]) AS time; +ERROR: timestamp out of range +SELECT time, time_bucket(INTERVAL '10000000 day', time, timestamp with time zone '294270-12-31 23:59:59.9999') +FROM unnest(ARRAY[ + timestamp with time zone '4710-11-24 01:01:01.0 BC' + ]) AS time; +ERROR: timestamp out of range +\set ON_ERROR_STOP 1 +------------------------------------------- +--- Test time_bucket with month periods --- +------------------------------------------- +SET datestyle TO ISO; +SELECT + time::date, + time_bucket('1 month', time::date) AS "1m", + time_bucket('2 month', time::date) AS "2m", + time_bucket('3 month', time::date) AS "3m", + time_bucket('1 month', time::date, '2000-02-01'::date) AS "1m origin", + time_bucket('2 month', time::date, '2000-02-01'::date) AS "2m origin", + time_bucket('3 month', time::date, '2000-02-01'::date) AS "3m origin" +FROM generate_series('1990-01-03'::date,'1990-06-03'::date,'1month'::interval) time; + time | 1m | 2m | 3m | 1m origin | 2m origin | 3m origin +------------+------------+------------+------------+------------+------------+------------ + 1990-01-03 | 1990-01-01 | 1990-01-01 | 1990-01-01 | 1990-01-01 | 1989-12-01 | 1989-11-01 + 1990-02-03 | 1990-02-01 | 1990-01-01 | 1990-01-01 | 1990-02-01 | 1990-02-01 | 1990-02-01 + 1990-03-03 | 1990-03-01 | 1990-03-01 | 1990-01-01 | 1990-03-01 | 1990-02-01 | 1990-02-01 + 1990-04-03 | 1990-04-01 | 1990-03-01 | 1990-04-01 | 1990-04-01 | 1990-04-01 | 1990-02-01 + 1990-05-03 | 1990-05-01 | 1990-05-01 | 1990-04-01 | 1990-05-01 | 1990-04-01 | 1990-05-01 + 1990-06-03 | 1990-06-01 | 1990-05-01 | 1990-04-01 | 1990-06-01 | 1990-06-01 | 1990-05-01 +(6 rows) + +SELECT + time, + time_bucket('1 month', time) AS "1m", + time_bucket('2 month', time) AS "2m", + time_bucket('3 month', time) AS "3m", + time_bucket('1 month', time, '2000-02-01'::timestamp) AS "1m origin", + time_bucket('2 month', time, '2000-02-01'::timestamp) AS "2m origin", + time_bucket('3 month', time, '2000-02-01'::timestamp) AS "3m origin" +FROM generate_series('1990-01-03'::timestamp,'1990-06-03'::timestamp,'1month'::interval) time; + time | 1m | 2m | 3m | 1m origin | 2m origin | 3m origin +---------------------+---------------------+---------------------+---------------------+---------------------+---------------------+--------------------- + 1990-01-03 00:00:00 | 1990-01-01 00:00:00 | 1990-01-01 00:00:00 | 1990-01-01 00:00:00 | 1990-01-01 00:00:00 | 1989-12-01 00:00:00 | 1989-11-01 00:00:00 + 1990-02-03 00:00:00 | 1990-02-01 00:00:00 | 1990-01-01 00:00:00 | 1990-01-01 00:00:00 | 1990-02-01 00:00:00 | 1990-02-01 00:00:00 | 1990-02-01 00:00:00 + 1990-03-03 00:00:00 | 1990-03-01 00:00:00 | 1990-03-01 00:00:00 | 1990-01-01 00:00:00 | 1990-03-01 00:00:00 | 1990-02-01 00:00:00 | 1990-02-01 00:00:00 + 1990-04-03 00:00:00 | 1990-04-01 00:00:00 | 1990-03-01 00:00:00 | 1990-04-01 00:00:00 | 1990-04-01 00:00:00 | 1990-04-01 00:00:00 | 1990-02-01 00:00:00 + 1990-05-03 00:00:00 | 1990-05-01 00:00:00 | 1990-05-01 00:00:00 | 1990-04-01 00:00:00 | 1990-05-01 00:00:00 | 1990-04-01 00:00:00 | 1990-05-01 00:00:00 + 1990-06-03 00:00:00 | 1990-06-01 00:00:00 | 1990-05-01 00:00:00 | 1990-04-01 00:00:00 | 1990-06-01 00:00:00 | 1990-06-01 00:00:00 | 1990-05-01 00:00:00 +(6 rows) + +SELECT + time, + time_bucket('1 month', time) AS "1m", + time_bucket('2 month', time) AS "2m", + time_bucket('3 month', time) AS "3m", + time_bucket('1 month', time, '2000-02-01'::timestamptz) AS "1m origin", + time_bucket('2 month', time, '2000-02-01'::timestamptz) AS "2m origin", + time_bucket('3 month', time, '2000-02-01'::timestamptz) AS "3m origin" +FROM generate_series('1990-01-03'::timestamptz,'1990-06-03'::timestamptz,'1month'::interval) time; + time | 1m | 2m | 3m | 1m origin | 2m origin | 3m origin +------------------------+------------------------+------------------------+------------------------+------------------------+------------------------+------------------------ + 1990-01-03 00:00:00-05 | 1989-12-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1989-11-30 19:00:00-05 | 1989-10-31 19:00:00-05 + 1990-02-03 00:00:00-05 | 1990-01-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1990-01-31 19:00:00-05 | 1990-01-31 19:00:00-05 | 1990-01-31 19:00:00-05 + 1990-03-03 00:00:00-05 | 1990-02-28 19:00:00-05 | 1990-02-28 19:00:00-05 | 1989-12-31 19:00:00-05 | 1990-02-28 19:00:00-05 | 1990-01-31 19:00:00-05 | 1990-01-31 19:00:00-05 + 1990-04-03 00:00:00-04 | 1990-03-31 19:00:00-05 | 1990-02-28 19:00:00-05 | 1990-03-31 19:00:00-05 | 1990-03-31 19:00:00-05 | 1990-03-31 19:00:00-05 | 1990-01-31 19:00:00-05 + 1990-05-03 00:00:00-04 | 1990-04-30 20:00:00-04 | 1990-04-30 20:00:00-04 | 1990-03-31 19:00:00-05 | 1990-04-30 20:00:00-04 | 1990-03-31 19:00:00-05 | 1990-04-30 20:00:00-04 + 1990-06-03 00:00:00-04 | 1990-05-31 20:00:00-04 | 1990-04-30 20:00:00-04 | 1990-03-31 19:00:00-05 | 1990-05-31 20:00:00-04 | 1990-05-31 20:00:00-04 | 1990-04-30 20:00:00-04 +(6 rows) + +--------------------------------------- +--- Test time_bucket with timezones --- +--------------------------------------- +-- test NULL args +SELECT +time_bucket(NULL::interval,now(),'Europe/Berlin'), +time_bucket('1day',NULL::timestamptz,'Europe/Berlin'), +time_bucket('1day',now(),NULL::text), +time_bucket('1day','2020-02-03','Europe/Berlin',NULL), +time_bucket('1day','2020-02-03','Europe/Berlin','2020-04-01',NULL), +time_bucket('1day','2020-02-03','Europe/Berlin',NULL,NULL), +time_bucket('1day','2020-02-03','Europe/Berlin',"offset":=NULL::interval), +time_bucket('1day','2020-02-03','Europe/Berlin',origin:=NULL::timestamptz); + time_bucket | time_bucket | time_bucket | time_bucket | time_bucket | time_bucket | time_bucket | time_bucket +-------------+-------------+-------------+------------------------+------------------------+------------------------+------------------------+------------------------ + | | | 2020-02-02 18:00:00-05 | 2020-02-03 00:00:00-05 | 2020-02-02 18:00:00-05 | 2020-02-02 18:00:00-05 | 2020-02-02 18:00:00-05 +(1 row) + +SET datestyle TO ISO; +SELECT + time_bucket('1day', ts) AS "UTC", + time_bucket('1day', ts, 'Europe/Berlin') AS "Berlin", + time_bucket('1day', ts, 'Europe/London') AS "London", + time_bucket('1day', ts, 'America/New_York') AS "New York", + time_bucket('1day', ts, 'PST') AS "PST", + time_bucket('1day', ts, current_setting('timezone')) AS "current" +FROM generate_series('1999-12-31 17:00'::timestamptz,'2000-01-02 3:00'::timestamptz, '1hour'::interval) ts; + UTC | Berlin | London | New York | PST | current +------------------------+------------------------+------------------------+------------------------+------------------------+------------------------ + 1999-12-30 19:00:00-05 | 1999-12-30 18:00:00-05 | 1999-12-30 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-30 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-30 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-31 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-31 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-31 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 2000-01-01 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-02 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-02 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-02 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-02 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-02 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-02 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-02 00:00:00-05 | 2000-01-02 03:00:00-05 | 2000-01-02 00:00:00-05 +(35 rows) + +SELECT + time_bucket('1month', ts) AS "UTC", + time_bucket('1month', ts, 'Europe/Berlin') AS "Berlin", + time_bucket('1month', ts, 'America/New_York') AS "New York", + time_bucket('1month', ts, current_setting('timezone')) AS "current", + time_bucket('2month', ts, current_setting('timezone')) AS "2m", + time_bucket('2month', ts, current_setting('timezone'), '2000-02-01'::timestamp) AS "2m origin", + time_bucket('2month', ts, current_setting('timezone'), "offset":='14 day'::interval) AS "2m offset", + time_bucket('2month', ts, current_setting('timezone'), '2000-02-01'::timestamp, '7 day'::interval) AS "2m offset + origin" +FROM generate_series('1999-12-01'::timestamptz,'2000-09-01'::timestamptz, '9 day'::interval) ts; + UTC | Berlin | New York | current | 2m | 2m origin | 2m offset | 2m offset + origin +------------------------+------------------------+------------------------+------------------------+------------------------+------------------------+------------------------+------------------------ + 1999-11-30 19:00:00-05 | 1999-11-30 18:00:00-05 | 1999-12-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-10-08 00:00:00-04 + 1999-11-30 19:00:00-05 | 1999-11-30 18:00:00-05 | 1999-12-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-11-30 19:00:00-05 | 1999-11-30 18:00:00-05 | 1999-12-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-11-30 19:00:00-05 | 1999-11-30 18:00:00-05 | 1999-12-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 2000-01-31 19:00:00-05 | 2000-01-31 18:00:00-05 | 2000-02-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 2000-01-31 19:00:00-05 | 2000-01-31 18:00:00-05 | 2000-02-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-01-31 19:00:00-05 | 2000-01-31 18:00:00-05 | 2000-02-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-01-31 19:00:00-05 | 2000-01-31 18:00:00-05 | 2000-02-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-02-29 19:00:00-05 | 2000-02-29 18:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-02-29 19:00:00-05 | 2000-02-29 18:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-02-29 19:00:00-05 | 2000-02-29 18:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-03-31 19:00:00-05 | 2000-03-31 17:00:00-05 | 2000-04-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-03-31 19:00:00-05 | 2000-03-31 17:00:00-05 | 2000-04-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-04-08 00:00:00-04 + 2000-03-31 19:00:00-05 | 2000-03-31 17:00:00-05 | 2000-04-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-04-08 00:00:00-04 + 2000-04-30 20:00:00-04 | 2000-04-30 18:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-04-08 00:00:00-04 + 2000-04-30 20:00:00-04 | 2000-04-30 18:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-04-08 00:00:00-04 + 2000-04-30 20:00:00-04 | 2000-04-30 18:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-04-01 00:00:00-05 | 2000-05-15 00:00:00-04 | 2000-04-08 00:00:00-04 + 2000-04-30 20:00:00-04 | 2000-04-30 18:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-04-01 00:00:00-05 | 2000-05-15 00:00:00-04 | 2000-04-08 00:00:00-04 + 2000-05-31 20:00:00-04 | 2000-05-31 18:00:00-04 | 2000-06-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-04-08 00:00:00-04 + 2000-05-31 20:00:00-04 | 2000-05-31 18:00:00-04 | 2000-06-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-05-31 20:00:00-04 | 2000-05-31 18:00:00-04 | 2000-06-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-06-30 20:00:00-04 | 2000-06-30 18:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-06-30 20:00:00-04 | 2000-06-30 18:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-06-30 20:00:00-04 | 2000-06-30 18:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-06-30 20:00:00-04 | 2000-06-30 18:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-07-31 20:00:00-04 | 2000-07-31 18:00:00-04 | 2000-08-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-08-08 00:00:00-04 + 2000-07-31 20:00:00-04 | 2000-07-31 18:00:00-04 | 2000-08-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-08-08 00:00:00-04 + 2000-07-31 20:00:00-04 | 2000-07-31 18:00:00-04 | 2000-08-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-08-08 00:00:00-04 +(31 rows) + +RESET datestyle; +------------------------------------------------------------ +--- Test timescaledb_experimental.time_bucket_ng function -- +------------------------------------------------------------ +-- not supported functionality +\set ON_ERROR_STOP 0 +SELECT timescaledb_experimental.time_bucket_ng('1 hour', '2001-02-03' :: date) AS result; +ERROR: interval must be either days and weeks, or months and years +SELECT timescaledb_experimental.time_bucket_ng('0 days', '2001-02-03' :: date) AS result; +ERROR: interval must be at least one day +SELECT timescaledb_experimental.time_bucket_ng('1 month', '2001-02-03' :: date, origin => '2000-01-02') AS result; +ERROR: origin must be the first day of the month +HINT: When using timestamptz-version of the function, 'origin' is converted to provided 'timezone'. +SELECT timescaledb_experimental.time_bucket_ng('1 month', '2000-01-02' :: date, origin => '2001-01-01') AS result; + result +------------ + 01-01-2000 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 day', '2000-01-02' :: date, origin => '2001-01-01') AS result; +ERROR: origin must be before the given date +SELECT timescaledb_experimental.time_bucket_ng('1 month 3 hours', '2021-11-22' :: timestamp) AS result; +ERROR: interval can't combine months with minutes or hours +-- timestamp is less than the default 'origin' value +SELECT timescaledb_experimental.time_bucket_ng('1 day', '1999-01-01 12:34:56 MSK' :: timestamptz, timezone => 'MSK'); +ERROR: origin must be before the given date +-- 'origin' in Europe/Moscow timezone is not the first day of the month at given time zone (UTC in this case) +select timescaledb_experimental.time_bucket_ng('1 month', '2021-07-12 12:34:56 Europe/Moscow' :: timestamptz, origin => '2021-06-01 00:00:00 Europe/Moscow' :: timestamptz, timezone => 'UTC'); +ERROR: origin must be the first day of the month +HINT: When using timestamptz-version of the function, 'origin' is converted to provided 'timezone'. +\set ON_ERROR_STOP 1 +-- wrappers +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-11-22' :: timestamp) AS result; + result +-------------------------- + Fri Jan 01 00:00:00 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-11-22' :: timestamptz) AS result; + result +------------------------------ + Fri Jan 01 00:00:00 2021 EST +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-11-22' :: timestamp, origin => '2021-06-01') AS result; + result +-------------------------- + Tue Jun 01 00:00:00 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-11-22' :: timestamptz, origin => '2021-06-01') AS result; + result +------------------------------ + Tue Jun 01 00:00:00 2021 EDT +(1 row) + +-- null argument +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: date) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamp) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamptz) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamptz, timezone => 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: date, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamp, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamptz, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamptz, origin => '2021-06-01', timezone => 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +-- null interval +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12' :: date) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamp) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamptz) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamptz, 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12' :: date, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamp, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamptz, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamptz, origin => '2021-06-01', timezone => 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +-- null origin +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12' :: date, origin => null) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamp, origin => null) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, origin => null) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, origin => null, timezone => 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +-- infinity argument +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: date) AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamp) AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamptz) AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamptz, timezone => 'Europe/Moscow') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: date, origin => '2021-06-01') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamp, origin => '2021-06-01') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamptz, origin => '2021-06-01') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamptz, origin => '2021-06-01', timezone => 'Europe/Moscow') AS result; + result +---------- + infinity +(1 row) + +-- test for specific code path: hours/minutes/seconds interval and timestamp argument +SELECT timescaledb_experimental.time_bucket_ng('12 hours', 'infinity' :: timestamp) AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('12 hours', 'infinity' :: timestamp, origin => '2021-06-01') AS result; + result +---------- + infinity +(1 row) + +-- infinite origin +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12' :: date, origin => 'infinity') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamp, origin => 'infinity') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, origin => 'infinity') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, origin => 'infinity', timezone => 'Europe/Moscow') AS result; + result +---------- + infinity +(1 row) + +-- test for specific code path: hours/minutes/seconds interval and timestamp argument +SELECT timescaledb_experimental.time_bucket_ng('12 hours', '2021-07-12 12:34:56' :: timestamp, origin => 'infinity') AS result; + result +---------- + infinity +(1 row) + +-- test for invalid timezone argument +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, timezone => null) AS result; + result +-------- + +(1 row) + +\set ON_ERROR_STOP 0 +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, timezone => 'Europe/Ololondon') AS result; +ERROR: time zone "Europe/Ololondon" not recognized +\set ON_ERROR_STOP 1 +-- Make sure time_bucket_ng() supports seconds, minutes, and hours. +-- We happen to know that the internal implementation is the same +-- as for time_bucket(), thus there is no reason to execute all the tests +-- we already have for time_bucket(). These two functions will most likely +-- be merged eventually anyway. +SELECT timescaledb_experimental.time_bucket_ng('30 seconds', '2021-07-12 12:34:56' :: timestamp) AS result; + result +-------------------------- + Mon Jul 12 12:34:30 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('15 minutes', '2021-07-12 12:34:56' :: timestamp) AS result; + result +-------------------------- + Mon Jul 12 12:30:00 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('6 hours', '2021-07-12 12:34:56' :: timestamp) AS result; + result +-------------------------- + Mon Jul 12 12:00:00 2021 +(1 row) + +-- Same as above, but with provided 'origin' argument. +SELECT timescaledb_experimental.time_bucket_ng('30 seconds', '2021-07-12 12:34:56' :: timestamp, origin => '2021-07-12 12:10:00') AS result; + result +-------------------------- + Mon Jul 12 12:34:30 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('15 minutes', '2021-07-12 12:34:56' :: timestamp, origin => '2021-07-12 12:10:00') AS result; + result +-------------------------- + Mon Jul 12 12:25:00 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('6 hours', '2021-07-12 12:34:56' :: timestamp, origin => '2021-07-12 12:10:00') AS result; + result +-------------------------- + Mon Jul 12 12:10:00 2021 +(1 row) + +-- N days / weeks buckets +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 day', d), 'YYYY-MM-DD') AS d1, + to_char(timescaledb_experimental.time_bucket_ng('2 days', d), 'YYYY-MM-DD') AS d2, + to_char(timescaledb_experimental.time_bucket_ng('3 days', d), 'YYYY-MM-DD') AS d3, + to_char(timescaledb_experimental.time_bucket_ng('1 week', d), 'YYYY-MM-DD') AS w1, + to_char(timescaledb_experimental.time_bucket_ng('1 week 2 days', d), 'YYYY-MM-DD') AS w1d2 +FROM generate_series('2020-01-01' :: date, '2020-01-12', '1 day') AS ts, + unnest(array[ts :: date]) AS d; + d | d1 | d2 | d3 | w1 | w1d2 +------------+------------+------------+------------+------------+------------ + 2020-01-01 | 2020-01-01 | 2019-12-31 | 2020-01-01 | 2019-12-28 | 2019-12-26 + 2020-01-02 | 2020-01-02 | 2020-01-02 | 2020-01-01 | 2019-12-28 | 2019-12-26 + 2020-01-03 | 2020-01-03 | 2020-01-02 | 2020-01-01 | 2019-12-28 | 2019-12-26 + 2020-01-04 | 2020-01-04 | 2020-01-04 | 2020-01-04 | 2020-01-04 | 2020-01-04 + 2020-01-05 | 2020-01-05 | 2020-01-04 | 2020-01-04 | 2020-01-04 | 2020-01-04 + 2020-01-06 | 2020-01-06 | 2020-01-06 | 2020-01-04 | 2020-01-04 | 2020-01-04 + 2020-01-07 | 2020-01-07 | 2020-01-06 | 2020-01-07 | 2020-01-04 | 2020-01-04 + 2020-01-08 | 2020-01-08 | 2020-01-08 | 2020-01-07 | 2020-01-04 | 2020-01-04 + 2020-01-09 | 2020-01-09 | 2020-01-08 | 2020-01-07 | 2020-01-04 | 2020-01-04 + 2020-01-10 | 2020-01-10 | 2020-01-10 | 2020-01-10 | 2020-01-04 | 2020-01-04 + 2020-01-11 | 2020-01-11 | 2020-01-10 | 2020-01-10 | 2020-01-11 | 2020-01-04 + 2020-01-12 | 2020-01-12 | 2020-01-12 | 2020-01-10 | 2020-01-11 | 2020-01-04 +(12 rows) + +-- N days / weeks buckets with given 'origin' +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 day', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS d1, + to_char(timescaledb_experimental.time_bucket_ng('2 days', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS d2, + to_char(timescaledb_experimental.time_bucket_ng('3 days', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS d3, + to_char(timescaledb_experimental.time_bucket_ng('1 week', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS w1, + to_char(timescaledb_experimental.time_bucket_ng('1 week 2 days', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS w1d2 +FROM generate_series('2020-01-01' :: date, '2020-01-12', '1 day') AS ts, + unnest(array[ts :: date]) AS d; + d | d1 | d2 | d3 | w1 | w1d2 +------------+------------+------------+------------+------------+------------ + 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-01-02 | 2020-01-02 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-01-03 | 2020-01-03 | 2020-01-03 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-01-04 | 2020-01-04 | 2020-01-03 | 2020-01-04 | 2020-01-01 | 2020-01-01 + 2020-01-05 | 2020-01-05 | 2020-01-05 | 2020-01-04 | 2020-01-01 | 2020-01-01 + 2020-01-06 | 2020-01-06 | 2020-01-05 | 2020-01-04 | 2020-01-01 | 2020-01-01 + 2020-01-07 | 2020-01-07 | 2020-01-07 | 2020-01-07 | 2020-01-01 | 2020-01-01 + 2020-01-08 | 2020-01-08 | 2020-01-07 | 2020-01-07 | 2020-01-08 | 2020-01-01 + 2020-01-09 | 2020-01-09 | 2020-01-09 | 2020-01-07 | 2020-01-08 | 2020-01-01 + 2020-01-10 | 2020-01-10 | 2020-01-09 | 2020-01-10 | 2020-01-08 | 2020-01-10 + 2020-01-11 | 2020-01-11 | 2020-01-11 | 2020-01-10 | 2020-01-08 | 2020-01-10 + 2020-01-12 | 2020-01-12 | 2020-01-11 | 2020-01-10 | 2020-01-08 | 2020-01-10 +(12 rows) + +-- N month buckets +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 month', d), 'YYYY-MM-DD') AS m1, + to_char(timescaledb_experimental.time_bucket_ng('2 month', d), 'YYYY-MM-DD') AS m2, + to_char(timescaledb_experimental.time_bucket_ng('3 month', d), 'YYYY-MM-DD') AS m3, + to_char(timescaledb_experimental.time_bucket_ng('4 month', d), 'YYYY-MM-DD') AS m4, + to_char(timescaledb_experimental.time_bucket_ng('5 month', d), 'YYYY-MM-DD') AS m5 +FROM generate_series('2020-01-01' :: date, '2020-12-01', '1 month') AS ts, + unnest(array[ts :: date]) AS d; + d | m1 | m2 | m3 | m4 | m5 +------------+------------+------------+------------+------------+------------ + 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-02-01 | 2020-02-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-03-01 | 2020-03-01 | 2020-03-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-04-01 | 2020-04-01 | 2020-03-01 | 2020-04-01 | 2020-01-01 | 2020-01-01 + 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-04-01 | 2020-05-01 | 2020-01-01 + 2020-06-01 | 2020-06-01 | 2020-05-01 | 2020-04-01 | 2020-05-01 | 2020-06-01 + 2020-07-01 | 2020-07-01 | 2020-07-01 | 2020-07-01 | 2020-05-01 | 2020-06-01 + 2020-08-01 | 2020-08-01 | 2020-07-01 | 2020-07-01 | 2020-05-01 | 2020-06-01 + 2020-09-01 | 2020-09-01 | 2020-09-01 | 2020-07-01 | 2020-09-01 | 2020-06-01 + 2020-10-01 | 2020-10-01 | 2020-09-01 | 2020-10-01 | 2020-09-01 | 2020-06-01 + 2020-11-01 | 2020-11-01 | 2020-11-01 | 2020-10-01 | 2020-09-01 | 2020-11-01 + 2020-12-01 | 2020-12-01 | 2020-11-01 | 2020-10-01 | 2020-09-01 | 2020-11-01 +(12 rows) + +-- N month buckets with given 'origin' +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m1, + to_char(timescaledb_experimental.time_bucket_ng('2 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m2, + to_char(timescaledb_experimental.time_bucket_ng('3 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m3, + to_char(timescaledb_experimental.time_bucket_ng('4 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m4, + to_char(timescaledb_experimental.time_bucket_ng('5 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m5 +FROM generate_series('2020-01-01' :: date, '2020-12-01', '1 month') AS ts, + unnest(array[ts :: date]) AS d; + d | m1 | m2 | m3 | m4 | m5 +------------+------------+------------+------------+------------+------------ + 2020-01-01 | 2020-01-01 | 2020-01-01 | 2019-11-01 | 2020-01-01 | 2019-10-01 + 2020-02-01 | 2020-02-01 | 2020-01-01 | 2020-02-01 | 2020-01-01 | 2019-10-01 + 2020-03-01 | 2020-03-01 | 2020-03-01 | 2020-02-01 | 2020-01-01 | 2020-03-01 + 2020-04-01 | 2020-04-01 | 2020-03-01 | 2020-02-01 | 2020-01-01 | 2020-03-01 + 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-03-01 + 2020-06-01 | 2020-06-01 | 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-03-01 + 2020-07-01 | 2020-07-01 | 2020-07-01 | 2020-05-01 | 2020-05-01 | 2020-03-01 + 2020-08-01 | 2020-08-01 | 2020-07-01 | 2020-08-01 | 2020-05-01 | 2020-08-01 + 2020-09-01 | 2020-09-01 | 2020-09-01 | 2020-08-01 | 2020-09-01 | 2020-08-01 + 2020-10-01 | 2020-10-01 | 2020-09-01 | 2020-08-01 | 2020-09-01 | 2020-08-01 + 2020-11-01 | 2020-11-01 | 2020-11-01 | 2020-11-01 | 2020-09-01 | 2020-08-01 + 2020-12-01 | 2020-12-01 | 2020-11-01 | 2020-11-01 | 2020-09-01 | 2020-08-01 +(12 rows) + +-- N years / N years, M month buckets +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 year', d), 'YYYY-MM-DD') AS y1, + to_char(timescaledb_experimental.time_bucket_ng('1 year 6 month', d), 'YYYY-MM-DD') AS y1m6, + to_char(timescaledb_experimental.time_bucket_ng('2 years', d), 'YYYY-MM-DD') AS y2, + to_char(timescaledb_experimental.time_bucket_ng('2 years 6 month', d), 'YYYY-MM-DD') AS y2m6, + to_char(timescaledb_experimental.time_bucket_ng('3 years', d), 'YYYY-MM-DD') AS y3 +FROM generate_series('2015-01-01' :: date, '2020-12-01', '6 month') AS ts, + unnest(array[ts :: date]) AS d; + d | y1 | y1m6 | y2 | y2m6 | y3 +------------+------------+------------+------------+------------+------------ + 2015-01-01 | 2015-01-01 | 2015-01-01 | 2014-01-01 | 2015-01-01 | 2015-01-01 + 2015-07-01 | 2015-01-01 | 2015-01-01 | 2014-01-01 | 2015-01-01 | 2015-01-01 + 2016-01-01 | 2016-01-01 | 2015-01-01 | 2016-01-01 | 2015-01-01 | 2015-01-01 + 2016-07-01 | 2016-01-01 | 2016-07-01 | 2016-01-01 | 2015-01-01 | 2015-01-01 + 2017-01-01 | 2017-01-01 | 2016-07-01 | 2016-01-01 | 2015-01-01 | 2015-01-01 + 2017-07-01 | 2017-01-01 | 2016-07-01 | 2016-01-01 | 2017-07-01 | 2015-01-01 + 2018-01-01 | 2018-01-01 | 2018-01-01 | 2018-01-01 | 2017-07-01 | 2018-01-01 + 2018-07-01 | 2018-01-01 | 2018-01-01 | 2018-01-01 | 2017-07-01 | 2018-01-01 + 2019-01-01 | 2019-01-01 | 2018-01-01 | 2018-01-01 | 2017-07-01 | 2018-01-01 + 2019-07-01 | 2019-01-01 | 2019-07-01 | 2018-01-01 | 2017-07-01 | 2018-01-01 + 2020-01-01 | 2020-01-01 | 2019-07-01 | 2020-01-01 | 2020-01-01 | 2018-01-01 + 2020-07-01 | 2020-01-01 | 2019-07-01 | 2020-01-01 | 2020-01-01 | 2018-01-01 +(12 rows) + +-- N years / N years, M month buckets with given 'origin' +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 year', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y1, + to_char(timescaledb_experimental.time_bucket_ng('1 year 6 month', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y1m6, + to_char(timescaledb_experimental.time_bucket_ng('2 years', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y2, + to_char(timescaledb_experimental.time_bucket_ng('2 years 6 month', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y2m6, + to_char(timescaledb_experimental.time_bucket_ng('3 years', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y3 +FROM generate_series('2015-01-01' :: date, '2020-12-01', '6 month') AS ts, + unnest(array[ts :: date]) AS d; + d | y1 | y1m6 | y2 | y2m6 | y3 +------------+------------+------------+------------+------------+------------ + 2015-01-01 | 2014-06-01 | 2013-12-01 | 2014-06-01 | 2012-12-01 | 2012-06-01 + 2015-07-01 | 2015-06-01 | 2015-06-01 | 2014-06-01 | 2015-06-01 | 2015-06-01 + 2016-01-01 | 2015-06-01 | 2015-06-01 | 2014-06-01 | 2015-06-01 | 2015-06-01 + 2016-07-01 | 2016-06-01 | 2015-06-01 | 2016-06-01 | 2015-06-01 | 2015-06-01 + 2017-01-01 | 2016-06-01 | 2016-12-01 | 2016-06-01 | 2015-06-01 | 2015-06-01 + 2017-07-01 | 2017-06-01 | 2016-12-01 | 2016-06-01 | 2015-06-01 | 2015-06-01 + 2018-01-01 | 2017-06-01 | 2016-12-01 | 2016-06-01 | 2017-12-01 | 2015-06-01 + 2018-07-01 | 2018-06-01 | 2018-06-01 | 2018-06-01 | 2017-12-01 | 2018-06-01 + 2019-01-01 | 2018-06-01 | 2018-06-01 | 2018-06-01 | 2017-12-01 | 2018-06-01 + 2019-07-01 | 2019-06-01 | 2018-06-01 | 2018-06-01 | 2017-12-01 | 2018-06-01 + 2020-01-01 | 2019-06-01 | 2019-12-01 | 2018-06-01 | 2017-12-01 | 2018-06-01 + 2020-07-01 | 2020-06-01 | 2019-12-01 | 2020-06-01 | 2020-06-01 | 2018-06-01 +(12 rows) + +-- Test timezones support with different bucket sizes +BEGIN; +-- Timestamptz type is displayed in the session timezone. +-- To get consistent results during the test we temporary set the session +-- timezone to the known one. +SET TIME ZONE '+00'; +-- Moscow is UTC+3 in the year 2021. Let's say you are dealing with '1 day' bucket. +-- In order to calculate the beginning of the bucket you have to take LOCAL +-- Moscow time and throw away the time. You will get the midnight. The new day +-- starts 3 hours EARLIER in Moscow than in UTC+0 time zone, thus resulting +-- timestamp will be 3 hours LESS than for UTC+0. +SELECT bs, tz, to_char(ts_out, 'YYYY-MM-DD HH24:MI:SS TZ') as res +FROM unnest(array['Europe/Moscow', 'UTC']) as tz, + unnest(array['12 hours', '1 day', '1 month', '4 months', '1 year']) as bs, + unnest(array['2021-07-12 12:34:56 Europe/Moscow' :: timestamptz]) as ts_in, + unnest(array[timescaledb_experimental.time_bucket_ng(bs :: interval, ts_in, timezone => tz)]) as ts_out +ORDER BY tz, bs :: interval; + bs | tz | res +----------+---------------+------------------------- + 12 hours | Europe/Moscow | 2021-07-12 09:00:00 +00 + 1 day | Europe/Moscow | 2021-07-11 21:00:00 +00 + 1 month | Europe/Moscow | 2021-06-30 21:00:00 +00 + 4 months | Europe/Moscow | 2021-04-30 21:00:00 +00 + 1 year | Europe/Moscow | 2020-12-31 21:00:00 +00 + 12 hours | UTC | 2021-07-12 00:00:00 +00 + 1 day | UTC | 2021-07-12 00:00:00 +00 + 1 month | UTC | 2021-07-01 00:00:00 +00 + 4 months | UTC | 2021-05-01 00:00:00 +00 + 1 year | UTC | 2021-01-01 00:00:00 +00 +(10 rows) + +-- Same as above, but with 'origin' +SELECT bs, tz, to_char(ts_out, 'YYYY-MM-DD HH24:MI:SS TZ') as res +FROM unnest(array['Europe/Moscow']) as tz, + unnest(array['12 hours', '1 day', '1 month', '4 months', '1 year']) as bs, + unnest(array['2021-07-12 12:34:56 Europe/Moscow' :: timestamptz]) as ts_in, + unnest(array['2021-06-01 00:00:00 Europe/Moscow' :: timestamptz]) as origin_in, + unnest(array[timescaledb_experimental.time_bucket_ng(bs :: interval, ts_in, origin => origin_in, timezone => tz)]) as ts_out +ORDER BY tz, bs :: interval; + bs | tz | res +----------+---------------+------------------------- + 12 hours | Europe/Moscow | 2021-07-12 09:00:00 +00 + 1 day | Europe/Moscow | 2021-07-11 21:00:00 +00 + 1 month | Europe/Moscow | 2021-06-30 21:00:00 +00 + 4 months | Europe/Moscow | 2021-05-31 21:00:00 +00 + 1 year | Europe/Moscow | 2021-05-31 21:00:00 +00 +(5 rows) + +-- Overwritten origin allows to work with dates earlier than the default origin +SELECT to_char(timescaledb_experimental.time_bucket_ng('1 day', '1999-01-01 12:34:56 MSK' :: timestamptz, origin => '1900-01-01 00:00:00 MSK', timezone => 'MSK'), 'YYYY-MM-DD HH24:MI:SS TZ'); + to_char +------------------------- + 1998-12-31 21:00:00 +00 +(1 row) + +-- Restore previously used time zone. +ROLLBACK; +------------------------------------- +--- Test time input functions -- +------------------------------------- +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE OR REPLACE FUNCTION test.interval_to_internal(coltype REGTYPE, value ANYELEMENT = NULL::BIGINT) RETURNS BIGINT +AS :MODULE_PATHNAME, 'ts_dimension_interval_to_internal_test' LANGUAGE C VOLATILE; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT test.interval_to_internal('TIMESTAMP'::regtype, INTERVAL '1 day'); + interval_to_internal +---------------------- + 86400000000 +(1 row) + +SELECT test.interval_to_internal('TIMESTAMP'::regtype, 86400000000); + interval_to_internal +---------------------- + 86400000000 +(1 row) + +---should give warning +SELECT test.interval_to_internal('TIMESTAMP'::regtype, 86400); +WARNING: unexpected interval: smaller than one second +HINT: The interval is specified in microseconds. + interval_to_internal +---------------------- + 86400 +(1 row) + +SELECT test.interval_to_internal('TIMESTAMP'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + +SELECT test.interval_to_internal('BIGINT'::regtype, 2147483649::bigint); + interval_to_internal +---------------------- + 2147483649 +(1 row) + +-- Default interval for integer is supported as part of +-- hypertable generalization +SELECT test.interval_to_internal('INT'::regtype); + interval_to_internal +---------------------- + 100000 +(1 row) + +SELECT test.interval_to_internal('SMALLINT'::regtype); + interval_to_internal +---------------------- + 10000 +(1 row) + +SELECT test.interval_to_internal('BIGINT'::regtype); + interval_to_internal +---------------------- + 1000000 +(1 row) + +SELECT test.interval_to_internal('TIMESTAMPTZ'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + +SELECT test.interval_to_internal('TIMESTAMP'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + +SELECT test.interval_to_internal('DATE'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + +\set VERBOSITY terse +\set ON_ERROR_STOP 0 +SELECT test.interval_to_internal('INT'::regtype, 2147483649::bigint); +ERROR: invalid interval: must be between 1 and 2147483647 +SELECT test.interval_to_internal('SMALLINT'::regtype, 32768::bigint); +ERROR: invalid interval: must be between 1 and 32767 +SELECT test.interval_to_internal('TEXT'::regtype, 32768::bigint); +ERROR: invalid type for dimension "testcol" +SELECT test.interval_to_internal('INT'::regtype, INTERVAL '1 day'); +ERROR: invalid interval type for integer dimension +\set ON_ERROR_STOP 1 diff --git a/test/expected/timestamp-15.out b/test/expected/timestamp-15.out new file mode 100644 index 00000000000..91ad38f2d88 --- /dev/null +++ b/test/expected/timestamp-15.out @@ -0,0 +1,2057 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- Utility function for grouping/slotting time with a given interval. +CREATE OR REPLACE FUNCTION date_group( + field timestamp, + group_interval interval +) + RETURNS timestamp LANGUAGE SQL STABLE AS +$BODY$ + SELECT to_timestamp((EXTRACT(EPOCH from $1)::int / + EXTRACT(EPOCH from group_interval)::int) * + EXTRACT(EPOCH from group_interval)::int)::timestamp; +$BODY$; +CREATE TABLE PUBLIC."testNs" ( + "timeCustom" TIMESTAMP NOT NULL, + device_id TEXT NOT NULL, + series_0 DOUBLE PRECISION NULL, + series_1 DOUBLE PRECISION NULL, + series_2 DOUBLE PRECISION NULL, + series_bool BOOLEAN NULL +); +CREATE INDEX ON PUBLIC."testNs" (device_id, "timeCustom" DESC NULLS LAST) WHERE device_id IS NOT NULL; +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE SCHEMA "testNs" AUTHORIZATION :ROLE_DEFAULT_PERM_USER; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT * FROM create_hypertable('"public"."testNs"', 'timeCustom', 'device_id', 2, associated_schema_name=>'testNs' ); +WARNING: column type "timestamp without time zone" used for "timeCustom" does not follow best practices + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 1 | public | testNs | t +(1 row) + +\c :TEST_DBNAME +INSERT INTO PUBLIC."testNs"("timeCustom", device_id, series_0, series_1) VALUES +('2009-11-12T01:00:00+00:00', 'dev1', 1.5, 1), +('2009-11-12T01:00:00+00:00', 'dev1', 1.5, 2), +('2009-11-10T23:00:02+00:00', 'dev1', 2.5, 3); +INSERT INTO PUBLIC."testNs"("timeCustom", device_id, series_0, series_1) VALUES +('2009-11-10T23:00:00+00:00', 'dev2', 1.5, 1), +('2009-11-10T23:00:00+00:00', 'dev2', 1.5, 2); +SELECT * FROM PUBLIC."testNs"; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +--------------------------+-----------+----------+----------+----------+------------- + Thu Nov 12 01:00:00 2009 | dev1 | 1.5 | 1 | | + Thu Nov 12 01:00:00 2009 | dev1 | 1.5 | 2 | | + Tue Nov 10 23:00:02 2009 | dev1 | 2.5 | 3 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 1 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 2 | | +(5 rows) + +SET client_min_messages = WARNING; +\echo 'The next 2 queries will differ in output between UTC and EST since the mod is on the 100th hour UTC' +The next 2 queries will differ in output between UTC and EST since the mod is on the 100th hour UTC +SET timezone = 'UTC'; +SELECT date_group("timeCustom", '100 days') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC; + time | sum +--------------------------+----- + Sun Sep 13 00:00:00 2009 | 8.5 +(1 row) + +SET timezone = 'EST'; +SELECT date_group("timeCustom", '100 days') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC; + time | sum +--------------------------+----- + Sat Sep 12 19:00:00 2009 | 8.5 +(1 row) + +\echo 'The rest of the queries will be the same in output between UTC and EST' +The rest of the queries will be the same in output between UTC and EST +SET timezone = 'UTC'; +SELECT date_group("timeCustom", '1 day') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC; + time | sum +--------------------------+----- + Tue Nov 10 00:00:00 2009 | 5.5 + Thu Nov 12 00:00:00 2009 | 3 +(2 rows) + +SET timezone = 'EST'; +SELECT date_group("timeCustom", '1 day') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC; + time | sum +--------------------------+----- + Mon Nov 09 19:00:00 2009 | 5.5 + Wed Nov 11 19:00:00 2009 | 3 +(2 rows) + +SET timezone = 'UTC'; +SELECT * +FROM PUBLIC."testNs" +WHERE "timeCustom" >= TIMESTAMP '2009-11-10T23:00:00' +AND "timeCustom" < TIMESTAMP '2009-11-12T01:00:00' ORDER BY "timeCustom" DESC, device_id, series_1; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +--------------------------+-----------+----------+----------+----------+------------- + Tue Nov 10 23:00:02 2009 | dev1 | 2.5 | 3 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 1 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 2 | | +(3 rows) + +SET timezone = 'EST'; +SELECT * +FROM PUBLIC."testNs" +WHERE "timeCustom" >= TIMESTAMP '2009-11-10T23:00:00' +AND "timeCustom" < TIMESTAMP '2009-11-12T01:00:00' ORDER BY "timeCustom" DESC, device_id, series_1; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +--------------------------+-----------+----------+----------+----------+------------- + Tue Nov 10 23:00:02 2009 | dev1 | 2.5 | 3 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 1 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 2 | | +(3 rows) + +SET timezone = 'UTC'; +SELECT date_group("timeCustom", '1 day') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC LIMIT 2; + time | sum +--------------------------+----- + Tue Nov 10 00:00:00 2009 | 5.5 + Thu Nov 12 00:00:00 2009 | 3 +(2 rows) + +SET timezone = 'EST'; +SELECT date_group("timeCustom", '1 day') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC LIMIT 2; + time | sum +--------------------------+----- + Mon Nov 09 19:00:00 2009 | 5.5 + Wed Nov 11 19:00:00 2009 | 3 +(2 rows) + +------------------------------------ +-- Test time conversion functions -- +------------------------------------ +\set ON_ERROR_STOP 0 +SET timezone = 'UTC'; +-- Conversion to timestamp using Postgres built-in function taking +-- double. Gives inaccurate result on Postgres <= 9.6.2. Accurate on +-- Postgres >= 9.6.3. +SELECT to_timestamp(1486480176.236538); + to_timestamp +------------------------------------- + Tue Feb 07 15:09:36.236538 2017 UTC +(1 row) + +-- extension-specific version taking microsecond UNIX timestamp +SELECT _timescaledb_functions.to_timestamp(1486480176236538); + to_timestamp +------------------------------------- + Tue Feb 07 15:09:36.236538 2017 UTC +(1 row) + +-- Should be the inverse of the statement above. +SELECT _timescaledb_functions.to_unix_microseconds('2017-02-07 15:09:36.236538+00'); + to_unix_microseconds +---------------------- + 1486480176236538 +(1 row) + +-- For timestamps, BIGINT MAX represents +Infinity and BIGINT MIN +-- -Infinity. We keep this notion for UNIX epoch time: +SELECT _timescaledb_functions.to_unix_microseconds('+infinity'); +ERROR: invalid input syntax for type timestamp with time zone: "+infinity" at character 52 +SELECT _timescaledb_functions.to_timestamp(9223372036854775807); + to_timestamp +-------------- + infinity +(1 row) + +SELECT _timescaledb_functions.to_unix_microseconds('-infinity'); + to_unix_microseconds +---------------------- + -9223372036854775808 +(1 row) + +SELECT _timescaledb_functions.to_timestamp(-9223372036854775808); + to_timestamp +-------------- + -infinity +(1 row) + +-- In UNIX microseconds, the largest bigint value below infinity +-- (BIGINT MAX) is smaller than internal date upper bound and should +-- therefore be OK. Further, converting to the internal postgres epoch +-- cannot overflow a 64-bit INTEGER since the postgres epoch is at a +-- later date compared to the UNIX epoch, and is therefore represented +-- by a smaller number +SELECT _timescaledb_functions.to_timestamp(9223372036854775806); + to_timestamp +--------------------------------------- + Sun Jan 10 04:00:54.775806 294247 UTC +(1 row) + +-- Julian day zero is -210866803200000000 microseconds from UNIX epoch +SELECT _timescaledb_functions.to_timestamp(-210866803200000000); + to_timestamp +--------------------------------- + Mon Nov 24 00:00:00 4714 UTC BC +(1 row) + +\set VERBOSITY default +-- Going beyond Julian day zero should give out-of-range error +SELECT _timescaledb_functions.to_timestamp(-210866803200000001); +ERROR: timestamp out of range +-- Lower bound on date (should return the Julian day zero UNIX timestamp above) +SELECT _timescaledb_functions.to_unix_microseconds('4714-11-24 00:00:00+00 BC'); + to_unix_microseconds +---------------------- + -210866803200000000 +(1 row) + +-- Going beyond lower bound on date should return out-of-range +SELECT _timescaledb_functions.to_unix_microseconds('4714-11-23 23:59:59.999999+00 BC'); +ERROR: timestamp out of range: "4714-11-23 23:59:59.999999+00 BC" +LINE 1: ...ELECT _timescaledb_functions.to_unix_microseconds('4714-11-2... + ^ +-- The upper bound for Postgres TIMESTAMPTZ +SELECT timestamp '294276-12-31 23:59:59.999999+00'; + timestamp +----------------------------------- + Sun Dec 31 23:59:59.999999 294276 +(1 row) + +-- Going beyond the upper bound, should fail +SELECT timestamp '294276-12-31 23:59:59.999999+00' + interval '1 us'; +ERROR: timestamp out of range +-- Cannot represent the upper bound timestamp with a UNIX microsecond timestamp +-- since the Postgres epoch is at a later date than the UNIX epoch. +SELECT _timescaledb_functions.to_unix_microseconds('294276-12-31 23:59:59.999999+00'); +ERROR: timestamp out of range +-- Subtracting the difference between the two epochs (10957 days) should bring +-- us within range. +SELECT timestamp '294276-12-31 23:59:59.999999+00' - interval '10957 days'; + ?column? +----------------------------------- + Fri Jan 01 23:59:59.999999 294247 +(1 row) + +SELECT _timescaledb_functions.to_unix_microseconds('294247-01-01 23:59:59.999999'); + to_unix_microseconds +---------------------- + 9223371331199999999 +(1 row) + +-- Adding one microsecond should take us out-of-range again +SELECT timestamp '294247-01-01 23:59:59.999999' + interval '1 us'; + ?column? +---------------------------- + Sat Jan 02 00:00:00 294247 +(1 row) + +SELECT _timescaledb_functions.to_unix_microseconds(timestamp '294247-01-01 23:59:59.999999' + interval '1 us'); +ERROR: timestamp out of range +--no time_bucketing of dates not by integer # of days +SELECT time_bucket('1 hour', DATE '2012-01-01'); +ERROR: interval must not have sub-day precision +SELECT time_bucket('25 hour', DATE '2012-01-01'); +ERROR: interval must be a multiple of a day +\set ON_ERROR_STOP 1 +SELECT time_bucket(INTERVAL '1 day', TIMESTAMP '2011-01-02 01:01:01'); + time_bucket +-------------------------- + Sun Jan 02 00:00:00 2011 +(1 row) + +SELECT time, time_bucket(INTERVAL '2 day ', time) +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-01 01:01:01', + TIMESTAMP '2011-01-02 01:01:01', + TIMESTAMP '2011-01-03 01:01:01', + TIMESTAMP '2011-01-04 01:01:01' + ]) AS time; + time | time_bucket +--------------------------+-------------------------- + Sat Jan 01 01:01:01 2011 | Sat Jan 01 00:00:00 2011 + Sun Jan 02 01:01:01 2011 | Sat Jan 01 00:00:00 2011 + Mon Jan 03 01:01:01 2011 | Mon Jan 03 00:00:00 2011 + Tue Jan 04 01:01:01 2011 | Mon Jan 03 00:00:00 2011 +(4 rows) + +SELECT int_def, time_bucket(int_def,TIMESTAMP '2011-01-02 01:01:01.111') +FROM unnest(ARRAY[ + INTERVAL '1 millisecond', + INTERVAL '1 second', + INTERVAL '1 minute', + INTERVAL '1 hour', + INTERVAL '1 day', + INTERVAL '2 millisecond', + INTERVAL '2 second', + INTERVAL '2 minute', + INTERVAL '2 hour', + INTERVAL '2 day' + ]) AS int_def; + int_def | time_bucket +--------------+------------------------------ + @ 0.001 secs | Sun Jan 02 01:01:01.111 2011 + @ 1 sec | Sun Jan 02 01:01:01 2011 + @ 1 min | Sun Jan 02 01:01:00 2011 + @ 1 hour | Sun Jan 02 01:00:00 2011 + @ 1 day | Sun Jan 02 00:00:00 2011 + @ 0.002 secs | Sun Jan 02 01:01:01.11 2011 + @ 2 secs | Sun Jan 02 01:01:00 2011 + @ 2 mins | Sun Jan 02 01:00:00 2011 + @ 2 hours | Sun Jan 02 00:00:00 2011 + @ 2 days | Sat Jan 01 00:00:00 2011 +(10 rows) + +\set ON_ERROR_STOP 0 +SELECT time_bucket(INTERVAL '1 year 1d',TIMESTAMP '2011-01-02 01:01:01.111'); +ERROR: month intervals cannot have day or time component +SELECT time_bucket(INTERVAL '1 month 1 minute',TIMESTAMP '2011-01-02 01:01:01.111'); +ERROR: month intervals cannot have day or time component +\set ON_ERROR_STOP 1 +SELECT time, time_bucket(INTERVAL '5 minute', time) +FROM unnest(ARRAY[ + TIMESTAMP '1970-01-01 00:59:59.999999', + TIMESTAMP '1970-01-01 01:01:00', + TIMESTAMP '1970-01-01 01:04:59.999999', + TIMESTAMP '1970-01-01 01:05:00' + ]) AS time; + time | time_bucket +---------------------------------+-------------------------- + Thu Jan 01 00:59:59.999999 1970 | Thu Jan 01 00:55:00 1970 + Thu Jan 01 01:01:00 1970 | Thu Jan 01 01:00:00 1970 + Thu Jan 01 01:04:59.999999 1970 | Thu Jan 01 01:00:00 1970 + Thu Jan 01 01:05:00 1970 | Thu Jan 01 01:05:00 1970 +(4 rows) + +SELECT time, time_bucket(INTERVAL '5 minute', time) +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:04:59.999999', + TIMESTAMP '2011-01-02 01:05:00', + TIMESTAMP '2011-01-02 01:09:59.999999', + TIMESTAMP '2011-01-02 01:10:00' + ]) AS time; + time | time_bucket +---------------------------------+-------------------------- + Sun Jan 02 01:04:59.999999 2011 | Sun Jan 02 01:00:00 2011 + Sun Jan 02 01:05:00 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:09:59.999999 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:10:00 2011 | Sun Jan 02 01:10:00 2011 +(4 rows) + +--offset with interval +SELECT time, time_bucket(INTERVAL '5 minute', time , INTERVAL '2 minutes') +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:01:59.999999', + TIMESTAMP '2011-01-02 01:02:00', + TIMESTAMP '2011-01-02 01:06:59.999999', + TIMESTAMP '2011-01-02 01:07:00' + ]) AS time; + time | time_bucket +---------------------------------+-------------------------- + Sun Jan 02 01:01:59.999999 2011 | Sun Jan 02 00:57:00 2011 + Sun Jan 02 01:02:00 2011 | Sun Jan 02 01:02:00 2011 + Sun Jan 02 01:06:59.999999 2011 | Sun Jan 02 01:02:00 2011 + Sun Jan 02 01:07:00 2011 | Sun Jan 02 01:07:00 2011 +(4 rows) + +SELECT time, time_bucket(INTERVAL '5 minute', time , - INTERVAL '2 minutes') +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:02:59.999999', + TIMESTAMP '2011-01-02 01:03:00', + TIMESTAMP '2011-01-02 01:07:59.999999', + TIMESTAMP '2011-01-02 01:08:00' + ]) AS time; + time | time_bucket +---------------------------------+-------------------------- + Sun Jan 02 01:02:59.999999 2011 | Sun Jan 02 00:58:00 2011 + Sun Jan 02 01:03:00 2011 | Sun Jan 02 01:03:00 2011 + Sun Jan 02 01:07:59.999999 2011 | Sun Jan 02 01:03:00 2011 + Sun Jan 02 01:08:00 2011 | Sun Jan 02 01:08:00 2011 +(4 rows) + +--offset with infinity +-- timestamp +SELECT time, time_bucket(INTERVAL '1 week', time, INTERVAL '1 day') +FROM unnest(ARRAY[ + timestamp '-Infinity', + timestamp 'Infinity' + ]) AS time; + time | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +-- timestamptz +SELECT time, time_bucket(INTERVAL '1 week', time, INTERVAL '1 day') +FROM unnest(ARRAY[ + timestamp with time zone '-Infinity', + timestamp with time zone 'Infinity' + ]) AS time; + time | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +-- Date +SELECT date, time_bucket(INTERVAL '1 week', date, INTERVAL '1 day') +FROM unnest(ARRAY[ + date '-Infinity', + date 'Infinity' + ]) AS date; + date | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +--example to align with an origin +SELECT time, time_bucket(INTERVAL '5 minute', time - (TIMESTAMP '2011-01-02 00:02:00' - TIMESTAMP 'epoch')) + (TIMESTAMP '2011-01-02 00:02:00'-TIMESTAMP 'epoch') +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:01:59.999999', + TIMESTAMP '2011-01-02 01:02:00', + TIMESTAMP '2011-01-02 01:06:59.999999', + TIMESTAMP '2011-01-02 01:07:00' + ]) AS time; + time | ?column? +---------------------------------+-------------------------- + Sun Jan 02 01:01:59.999999 2011 | Sun Jan 02 00:57:00 2011 + Sun Jan 02 01:02:00 2011 | Sun Jan 02 01:02:00 2011 + Sun Jan 02 01:06:59.999999 2011 | Sun Jan 02 01:02:00 2011 + Sun Jan 02 01:07:00 2011 | Sun Jan 02 01:07:00 2011 +(4 rows) + +--rounding version +SELECT time, time_bucket(INTERVAL '5 minute', time , - INTERVAL '2.5 minutes') + INTERVAL '2 minutes 30 seconds' +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:05:01', + TIMESTAMP '2011-01-02 01:07:29', + TIMESTAMP '2011-01-02 01:02:30', + TIMESTAMP '2011-01-02 01:07:30', + TIMESTAMP '2011-01-02 01:02:29' + ]) AS time; + time | ?column? +--------------------------+-------------------------- + Sun Jan 02 01:05:01 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:07:29 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:02:30 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:07:30 2011 | Sun Jan 02 01:10:00 2011 + Sun Jan 02 01:02:29 2011 | Sun Jan 02 01:00:00 2011 +(5 rows) + +--time_bucket with timezone should mimick date_trunc +SET timezone TO 'UTC'; +SELECT time, time_bucket(INTERVAL '1 hour', time), date_trunc('hour', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 UTC | Sun Jan 02 01:00:00 2011 UTC | Sun Jan 02 01:00:00 2011 UTC + Sun Jan 02 00:01:01 2011 UTC | Sun Jan 02 00:00:00 2011 UTC | Sun Jan 02 00:00:00 2011 UTC + Sat Jan 01 23:01:01 2011 UTC | Sat Jan 01 23:00:00 2011 UTC | Sat Jan 01 23:00:00 2011 UTC +(3 rows) + +SELECT time, time_bucket(INTERVAL '1 day', time), date_trunc('day', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 UTC | Sun Jan 02 00:00:00 2011 UTC | Sun Jan 02 00:00:00 2011 UTC + Sun Jan 02 00:01:01 2011 UTC | Sun Jan 02 00:00:00 2011 UTC | Sun Jan 02 00:00:00 2011 UTC + Sat Jan 01 23:01:01 2011 UTC | Sat Jan 01 00:00:00 2011 UTC | Sat Jan 01 00:00:00 2011 UTC +(3 rows) + +--what happens with a local tz +SET timezone TO 'America/New_York'; +SELECT time, time_bucket(INTERVAL '1 hour', time), date_trunc('hour', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 EST | Sun Jan 02 01:00:00 2011 EST | Sun Jan 02 01:00:00 2011 EST + Sat Jan 01 19:01:01 2011 EST | Sat Jan 01 19:00:00 2011 EST | Sat Jan 01 19:00:00 2011 EST + Sat Jan 01 18:01:01 2011 EST | Sat Jan 01 18:00:00 2011 EST | Sat Jan 01 18:00:00 2011 EST +(3 rows) + +--Note the timestamp tz input is aligned with UTC day /not/ local day. different than date_trunc. +SELECT time, time_bucket(INTERVAL '1 day', time), date_trunc('day', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-03 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-04 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 EST | Sat Jan 01 19:00:00 2011 EST | Sun Jan 02 00:00:00 2011 EST + Sun Jan 02 19:01:01 2011 EST | Sun Jan 02 19:00:00 2011 EST | Sun Jan 02 00:00:00 2011 EST + Mon Jan 03 18:01:01 2011 EST | Sun Jan 02 19:00:00 2011 EST | Mon Jan 03 00:00:00 2011 EST +(3 rows) + +--can force local bucketing with simple cast. +SELECT time, time_bucket(INTERVAL '1 day', time::timestamp), date_trunc('day', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-03 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-04 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+--------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 EST | Sun Jan 02 00:00:00 2011 | Sun Jan 02 00:00:00 2011 EST + Sun Jan 02 19:01:01 2011 EST | Sun Jan 02 00:00:00 2011 | Sun Jan 02 00:00:00 2011 EST + Mon Jan 03 18:01:01 2011 EST | Mon Jan 03 00:00:00 2011 | Mon Jan 03 00:00:00 2011 EST +(3 rows) + +--can also use interval to correct +SELECT time, time_bucket(INTERVAL '1 day', time, -INTERVAL '19 hours'), date_trunc('day', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-03 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-04 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 EST | Sun Jan 02 00:00:00 2011 EST | Sun Jan 02 00:00:00 2011 EST + Sun Jan 02 19:01:01 2011 EST | Sun Jan 02 00:00:00 2011 EST | Sun Jan 02 00:00:00 2011 EST + Mon Jan 03 18:01:01 2011 EST | Mon Jan 03 00:00:00 2011 EST | Mon Jan 03 00:00:00 2011 EST +(3 rows) + +--dst: same local hour bucketed as two different hours. +SELECT time, time_bucket(INTERVAL '1 hour', time), date_trunc('hour', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2017-11-05 12:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 13:05:00+07' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Nov 05 01:05:00 2017 EDT | Sun Nov 05 01:00:00 2017 EDT | Sun Nov 05 01:00:00 2017 EDT + Sun Nov 05 01:05:00 2017 EST | Sun Nov 05 01:00:00 2017 EST | Sun Nov 05 01:00:00 2017 EST +(2 rows) + +--local alignment changes when bucketing by UTC across dst boundary +SELECT time, time_bucket(INTERVAL '2 hour', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2017-11-05 10:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 12:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 13:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 15:05:00+07' + ]) AS time; + time | time_bucket +------------------------------+------------------------------ + Sat Nov 04 23:05:00 2017 EDT | Sat Nov 04 22:00:00 2017 EDT + Sun Nov 05 01:05:00 2017 EDT | Sun Nov 05 00:00:00 2017 EDT + Sun Nov 05 01:05:00 2017 EST | Sun Nov 05 01:00:00 2017 EST + Sun Nov 05 03:05:00 2017 EST | Sun Nov 05 03:00:00 2017 EST +(4 rows) + +--local alignment is preserved when bucketing by local time across DST boundary. +SELECT time, time_bucket(INTERVAL '2 hour', time::timestamp) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2017-11-05 10:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 12:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 13:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 15:05:00+07' + ]) AS time; + time | time_bucket +------------------------------+-------------------------- + Sat Nov 04 23:05:00 2017 EDT | Sat Nov 04 22:00:00 2017 + Sun Nov 05 01:05:00 2017 EDT | Sun Nov 05 00:00:00 2017 + Sun Nov 05 01:05:00 2017 EST | Sun Nov 05 00:00:00 2017 + Sun Nov 05 03:05:00 2017 EST | Sun Nov 05 02:00:00 2017 +(4 rows) + +SELECT time, + time_bucket(10::smallint, time) AS time_bucket_smallint, + time_bucket(10::int, time) AS time_bucket_int, + time_bucket(10::bigint, time) AS time_bucket_bigint +FROM unnest(ARRAY[ + '-11', + '-10', + '-9', + '-1', + '0', + '1', + '99', + '100', + '109', + '110' + ]::smallint[]) AS time; + time | time_bucket_smallint | time_bucket_int | time_bucket_bigint +------+----------------------+-----------------+-------------------- + -11 | -20 | -20 | -20 + -10 | -10 | -10 | -10 + -9 | -10 | -10 | -10 + -1 | -10 | -10 | -10 + 0 | 0 | 0 | 0 + 1 | 0 | 0 | 0 + 99 | 90 | 90 | 90 + 100 | 100 | 100 | 100 + 109 | 100 | 100 | 100 + 110 | 110 | 110 | 110 +(10 rows) + +SELECT time, + time_bucket(10::smallint, time, 2::smallint) AS time_bucket_smallint, + time_bucket(10::int, time, 2::int) AS time_bucket_int, + time_bucket(10::bigint, time, 2::bigint) AS time_bucket_bigint +FROM unnest(ARRAY[ + '-9', + '-8', + '-7', + '1', + '2', + '3', + '101', + '102', + '111', + '112' + ]::smallint[]) AS time; + time | time_bucket_smallint | time_bucket_int | time_bucket_bigint +------+----------------------+-----------------+-------------------- + -9 | -18 | -18 | -18 + -8 | -8 | -8 | -8 + -7 | -8 | -8 | -8 + 1 | -8 | -8 | -8 + 2 | 2 | 2 | 2 + 3 | 2 | 2 | 2 + 101 | 92 | 92 | 92 + 102 | 102 | 102 | 102 + 111 | 102 | 102 | 102 + 112 | 112 | 112 | 112 +(10 rows) + +SELECT time, + time_bucket(10::smallint, time, -2::smallint) AS time_bucket_smallint, + time_bucket(10::int, time, -2::int) AS time_bucket_int, + time_bucket(10::bigint, time, -2::bigint) AS time_bucket_bigint +FROM unnest(ARRAY[ + '-13', + '-12', + '-11', + '-3', + '-2', + '-1', + '97', + '98', + '107', + '108' + ]::smallint[]) AS time; + time | time_bucket_smallint | time_bucket_int | time_bucket_bigint +------+----------------------+-----------------+-------------------- + -13 | -22 | -22 | -22 + -12 | -12 | -12 | -12 + -11 | -12 | -12 | -12 + -3 | -12 | -12 | -12 + -2 | -2 | -2 | -2 + -1 | -2 | -2 | -2 + 97 | 88 | 88 | 88 + 98 | 98 | 98 | 98 + 107 | 98 | 98 | 98 + 108 | 108 | 108 | 108 +(10 rows) + +\set ON_ERROR_STOP 0 +SELECT time_bucket(10::smallint, '-32768'::smallint); +ERROR: timestamp out of range +SELECT time_bucket(10::smallint, '-32761'::smallint); +ERROR: timestamp out of range +select time_bucket(10::smallint, '-32768'::smallint, 1000::smallint); +ERROR: timestamp out of range +select time_bucket(10::smallint, '-32768'::smallint, '32767'::smallint); +ERROR: timestamp out of range +select time_bucket(10::smallint, '32767'::smallint, '-32768'::smallint); +ERROR: timestamp out of range +\set ON_ERROR_STOP 1 +SELECT time, time_bucket(10::smallint, time) +FROM unnest(ARRAY[ + '-32760', + '-32759', + '32767' + ]::smallint[]) AS time; + time | time_bucket +--------+------------- + -32760 | -32760 + -32759 | -32760 + 32767 | 32760 +(3 rows) + +\set ON_ERROR_STOP 0 +SELECT time_bucket(10::int, '-2147483648'::int); +ERROR: timestamp out of range +SELECT time_bucket(10::int, '-2147483641'::int); +ERROR: timestamp out of range +SELECT time_bucket(1000::int, '-2147483000'::int, 1::int); +ERROR: timestamp out of range +SELECT time_bucket(1000::int, '-2147483648'::int, '2147483647'::int); +ERROR: timestamp out of range +SELECT time_bucket(1000::int, '2147483647'::int, '-2147483648'::int); +ERROR: timestamp out of range +\set ON_ERROR_STOP 1 +SELECT time, time_bucket(10::int, time) +FROM unnest(ARRAY[ + '-2147483640', + '-2147483639', + '2147483647' + ]::int[]) AS time; + time | time_bucket +-------------+------------- + -2147483640 | -2147483640 + -2147483639 | -2147483640 + 2147483647 | 2147483640 +(3 rows) + +\set ON_ERROR_STOP 0 +SELECT time_bucket(10::bigint, '-9223372036854775808'::bigint); +ERROR: timestamp out of range +SELECT time_bucket(10::bigint, '-9223372036854775801'::bigint); +ERROR: timestamp out of range +SELECT time_bucket(1000::bigint, '-9223372036854775000'::bigint, 1::bigint); +ERROR: timestamp out of range +SELECT time_bucket(1000::bigint, '-9223372036854775808'::bigint, '9223372036854775807'::bigint); +ERROR: timestamp out of range +SELECT time_bucket(1000::bigint, '9223372036854775807'::bigint, '-9223372036854775808'::bigint); +ERROR: timestamp out of range +\set ON_ERROR_STOP 1 +SELECT time, time_bucket(10::bigint, time) +FROM unnest(ARRAY[ + '-9223372036854775800', + '-9223372036854775799', + '9223372036854775807' + ]::bigint[]) AS time; + time | time_bucket +----------------------+---------------------- + -9223372036854775800 | -9223372036854775800 + -9223372036854775799 | -9223372036854775800 + 9223372036854775807 | 9223372036854775800 +(3 rows) + +SELECT time, time_bucket(INTERVAL '1 day', time::date) +FROM unnest(ARRAY[ + date '2017-11-05', + date '2017-11-06' + ]) AS time; + time | time_bucket +------------+------------- + 11-05-2017 | 11-05-2017 + 11-06-2017 | 11-06-2017 +(2 rows) + +SELECT time, time_bucket(INTERVAL '4 day', time::date) +FROM unnest(ARRAY[ + date '2017-11-04', + date '2017-11-05', + date '2017-11-08', + date '2017-11-09' + ]) AS time; + time | time_bucket +------------+------------- + 11-04-2017 | 11-01-2017 + 11-05-2017 | 11-05-2017 + 11-08-2017 | 11-05-2017 + 11-09-2017 | 11-09-2017 +(4 rows) + +SELECT time, time_bucket(INTERVAL '4 day', time::date, INTERVAL '2 day') +FROM unnest(ARRAY[ + date '2017-11-06', + date '2017-11-07', + date '2017-11-10', + date '2017-11-11' + ]) AS time; + time | time_bucket +------------+------------- + 11-06-2017 | 11-03-2017 + 11-07-2017 | 11-07-2017 + 11-10-2017 | 11-07-2017 + 11-11-2017 | 11-11-2017 +(4 rows) + +-- 2019-09-24 is a Monday, and we want to ensure that time_bucket returns the week starting with a Monday as date_trunc does, +-- Rather than a Saturday which is the date of the PostgreSQL epoch +SELECT time, time_bucket(INTERVAL '1 week', time::date) +FROM unnest(ARRAY[ + date '2018-09-16', + date '2018-09-17', + date '2018-09-23', + date '2018-09-24' + ]) AS time; + time | time_bucket +------------+------------- + 09-16-2018 | 09-10-2018 + 09-17-2018 | 09-17-2018 + 09-23-2018 | 09-17-2018 + 09-24-2018 | 09-24-2018 +(4 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp without time zone '2018-09-16', + timestamp without time zone '2018-09-17', + timestamp without time zone '2018-09-23', + timestamp without time zone '2018-09-24' + ]) AS time; + time | time_bucket +--------------------------+-------------------------- + Sun Sep 16 00:00:00 2018 | Mon Sep 10 00:00:00 2018 + Mon Sep 17 00:00:00 2018 | Mon Sep 17 00:00:00 2018 + Sun Sep 23 00:00:00 2018 | Mon Sep 17 00:00:00 2018 + Mon Sep 24 00:00:00 2018 | Mon Sep 24 00:00:00 2018 +(4 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp with time zone '2018-09-16', + timestamp with time zone '2018-09-17', + timestamp with time zone '2018-09-23', + timestamp with time zone '2018-09-24' + ]) AS time; + time | time_bucket +------------------------------+------------------------------ + Sun Sep 16 00:00:00 2018 EDT | Sun Sep 09 20:00:00 2018 EDT + Mon Sep 17 00:00:00 2018 EDT | Sun Sep 16 20:00:00 2018 EDT + Sun Sep 23 00:00:00 2018 EDT | Sun Sep 16 20:00:00 2018 EDT + Mon Sep 24 00:00:00 2018 EDT | Sun Sep 23 20:00:00 2018 EDT +(4 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp with time zone '-Infinity', + timestamp with time zone 'Infinity' + ]) AS time; + time | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp without time zone '-Infinity', + timestamp without time zone 'Infinity' + ]) AS time; + time | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time), date_trunc('week', time) = time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp without time zone '4714-11-24 01:01:01.0 BC', + timestamp without time zone '294276-12-31 23:59:59.9999' + ]) AS time; + time | time_bucket | ?column? +---------------------------------+-----------------------------+---------- + Mon Nov 24 01:01:01 4714 BC | Mon Nov 24 00:00:00 4714 BC | t + Sun Dec 31 23:59:59.9999 294276 | Mon Dec 25 00:00:00 294276 | t +(2 rows) + +--1000 years later weeks still align. +SELECT time, time_bucket(INTERVAL '1 week', time), date_trunc('week', time) = time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp without time zone '3018-09-14', + timestamp without time zone '3018-09-20', + timestamp without time zone '3018-09-21', + timestamp without time zone '3018-09-22' + ]) AS time; + time | time_bucket | ?column? +--------------------------+--------------------------+---------- + Mon Sep 14 00:00:00 3018 | Mon Sep 14 00:00:00 3018 | t + Sun Sep 20 00:00:00 3018 | Mon Sep 14 00:00:00 3018 | t + Mon Sep 21 00:00:00 3018 | Mon Sep 21 00:00:00 3018 | t + Tue Sep 22 00:00:00 3018 | Mon Sep 21 00:00:00 3018 | t +(4 rows) + +--weeks align for timestamptz as well if cast to local time, (but not if done at UTC). +SELECT time, date_trunc('week', time) = time_bucket(INTERVAL '1 week', time), date_trunc('week', time) = time_bucket(INTERVAL '1 week', time::timestamp) +FROM unnest(ARRAY[ + timestamp with time zone '3018-09-14', + timestamp with time zone '3018-09-20', + timestamp with time zone '3018-09-21', + timestamp with time zone '3018-09-22' + ]) AS time; + time | ?column? | ?column? +------------------------------+----------+---------- + Mon Sep 14 00:00:00 3018 EDT | f | t + Sun Sep 20 00:00:00 3018 EDT | f | t + Mon Sep 21 00:00:00 3018 EDT | f | t + Tue Sep 22 00:00:00 3018 EDT | f | t +(4 rows) + +--check functions with origin +--note that the default origin is at 0 UTC, using origin parameter it is easy to provide a EDT origin point +\x +SELECT time, time_bucket(INTERVAL '1 week', time) no_epoch, + time_bucket(INTERVAL '1 week', time::timestamp) no_epoch_local, + time_bucket(INTERVAL '1 week', time) = time_bucket(INTERVAL '1 week', time, timestamptz '2000-01-03 00:00:00+0') always_true, + time_bucket(INTERVAL '1 week', time, timestamptz '2000-01-01 00:00:00+0') pg_epoch, + time_bucket(INTERVAL '1 week', time, timestamptz 'epoch') unix_epoch, + time_bucket(INTERVAL '1 week', time, timestamptz '3018-09-13') custom_1, + time_bucket(INTERVAL '1 week', time, timestamptz '3018-09-14') custom_2 +FROM unnest(ARRAY[ + timestamp with time zone '2000-01-01 00:00:00+0'- interval '1 second', + timestamp with time zone '2000-01-01 00:00:00+0', + timestamp with time zone '2000-01-03 00:00:00+0'- interval '1 second', + timestamp with time zone '2000-01-03 00:00:00+0', + timestamp with time zone '2000-01-01', + timestamp with time zone '2000-01-02', + timestamp with time zone '2000-01-03', + timestamp with time zone '3018-09-12', + timestamp with time zone '3018-09-13', + timestamp with time zone '3018-09-14', + timestamp with time zone '3018-09-15' + ]) AS time; +-[ RECORD 1 ]--+----------------------------- +time | Fri Dec 31 18:59:59 1999 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 24 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Dec 25 23:00:00 1999 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 2 ]--+----------------------------- +time | Fri Dec 31 19:00:00 1999 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Dec 25 23:00:00 1999 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 3 ]--+----------------------------- +time | Sun Jan 02 18:59:59 2000 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Jan 01 23:00:00 2000 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 4 ]--+----------------------------- +time | Sun Jan 02 19:00:00 2000 EST +no_epoch | Sun Jan 02 19:00:00 2000 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Jan 01 23:00:00 2000 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 5 ]--+----------------------------- +time | Sat Jan 01 00:00:00 2000 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Dec 25 23:00:00 1999 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 6 ]--+----------------------------- +time | Sun Jan 02 00:00:00 2000 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Jan 01 23:00:00 2000 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 7 ]--+----------------------------- +time | Mon Jan 03 00:00:00 2000 EST +no_epoch | Sun Jan 02 19:00:00 2000 EST +no_epoch_local | Mon Jan 03 00:00:00 2000 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Jan 01 23:00:00 2000 EST +custom_2 | Sun Jan 02 23:00:00 2000 EST +-[ RECORD 8 ]--+----------------------------- +time | Sat Sep 12 00:00:00 3018 EDT +no_epoch | Sun Sep 06 20:00:00 3018 EDT +no_epoch_local | Mon Sep 07 00:00:00 3018 +always_true | t +pg_epoch | Fri Sep 11 20:00:00 3018 EDT +unix_epoch | Wed Sep 09 20:00:00 3018 EDT +custom_1 | Sun Sep 06 00:00:00 3018 EDT +custom_2 | Mon Sep 07 00:00:00 3018 EDT +-[ RECORD 9 ]--+----------------------------- +time | Sun Sep 13 00:00:00 3018 EDT +no_epoch | Sun Sep 06 20:00:00 3018 EDT +no_epoch_local | Mon Sep 07 00:00:00 3018 +always_true | t +pg_epoch | Fri Sep 11 20:00:00 3018 EDT +unix_epoch | Wed Sep 09 20:00:00 3018 EDT +custom_1 | Sun Sep 13 00:00:00 3018 EDT +custom_2 | Mon Sep 07 00:00:00 3018 EDT +-[ RECORD 10 ]-+----------------------------- +time | Mon Sep 14 00:00:00 3018 EDT +no_epoch | Sun Sep 13 20:00:00 3018 EDT +no_epoch_local | Mon Sep 14 00:00:00 3018 +always_true | t +pg_epoch | Fri Sep 11 20:00:00 3018 EDT +unix_epoch | Wed Sep 09 20:00:00 3018 EDT +custom_1 | Sun Sep 13 00:00:00 3018 EDT +custom_2 | Mon Sep 14 00:00:00 3018 EDT +-[ RECORD 11 ]-+----------------------------- +time | Tue Sep 15 00:00:00 3018 EDT +no_epoch | Sun Sep 13 20:00:00 3018 EDT +no_epoch_local | Mon Sep 14 00:00:00 3018 +always_true | t +pg_epoch | Fri Sep 11 20:00:00 3018 EDT +unix_epoch | Wed Sep 09 20:00:00 3018 EDT +custom_1 | Sun Sep 13 00:00:00 3018 EDT +custom_2 | Mon Sep 14 00:00:00 3018 EDT + +SELECT time, time_bucket(INTERVAL '1 week', time) no_epoch, + time_bucket(INTERVAL '1 week', time) = time_bucket(INTERVAL '1 week', time, timestamp '2000-01-03 00:00:00') always_true, + time_bucket(INTERVAL '1 week', time, timestamp '2000-01-01 00:00:00+0') pg_epoch, + time_bucket(INTERVAL '1 week', time, timestamp 'epoch') unix_epoch, + time_bucket(INTERVAL '1 week', time, timestamp '3018-09-13') custom_1, + time_bucket(INTERVAL '1 week', time, timestamp '3018-09-14') custom_2 +FROM unnest(ARRAY[ + timestamp without time zone '2000-01-01 00:00:00'- interval '1 second', + timestamp without time zone '2000-01-01 00:00:00', + timestamp without time zone '2000-01-03 00:00:00'- interval '1 second', + timestamp without time zone '2000-01-03 00:00:00', + timestamp without time zone '2000-01-01', + timestamp without time zone '2000-01-02', + timestamp without time zone '2000-01-03', + timestamp without time zone '3018-09-12', + timestamp without time zone '3018-09-13', + timestamp without time zone '3018-09-14', + timestamp without time zone '3018-09-15' + ]) AS time; +-[ RECORD 1 ]------------------------- +time | Fri Dec 31 23:59:59 1999 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Dec 25 00:00:00 1999 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Dec 26 00:00:00 1999 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 2 ]------------------------- +time | Sat Jan 01 00:00:00 2000 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Dec 26 00:00:00 1999 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 3 ]------------------------- +time | Sun Jan 02 23:59:59 2000 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Jan 02 00:00:00 2000 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 4 ]------------------------- +time | Mon Jan 03 00:00:00 2000 +no_epoch | Mon Jan 03 00:00:00 2000 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Jan 02 00:00:00 2000 +custom_2 | Mon Jan 03 00:00:00 2000 +-[ RECORD 5 ]------------------------- +time | Sat Jan 01 00:00:00 2000 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Dec 26 00:00:00 1999 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 6 ]------------------------- +time | Sun Jan 02 00:00:00 2000 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Jan 02 00:00:00 2000 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 7 ]------------------------- +time | Mon Jan 03 00:00:00 2000 +no_epoch | Mon Jan 03 00:00:00 2000 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Jan 02 00:00:00 2000 +custom_2 | Mon Jan 03 00:00:00 2000 +-[ RECORD 8 ]------------------------- +time | Sat Sep 12 00:00:00 3018 +no_epoch | Mon Sep 07 00:00:00 3018 +always_true | t +pg_epoch | Sat Sep 12 00:00:00 3018 +unix_epoch | Thu Sep 10 00:00:00 3018 +custom_1 | Sun Sep 06 00:00:00 3018 +custom_2 | Mon Sep 07 00:00:00 3018 +-[ RECORD 9 ]------------------------- +time | Sun Sep 13 00:00:00 3018 +no_epoch | Mon Sep 07 00:00:00 3018 +always_true | t +pg_epoch | Sat Sep 12 00:00:00 3018 +unix_epoch | Thu Sep 10 00:00:00 3018 +custom_1 | Sun Sep 13 00:00:00 3018 +custom_2 | Mon Sep 07 00:00:00 3018 +-[ RECORD 10 ]------------------------ +time | Mon Sep 14 00:00:00 3018 +no_epoch | Mon Sep 14 00:00:00 3018 +always_true | t +pg_epoch | Sat Sep 12 00:00:00 3018 +unix_epoch | Thu Sep 10 00:00:00 3018 +custom_1 | Sun Sep 13 00:00:00 3018 +custom_2 | Mon Sep 14 00:00:00 3018 +-[ RECORD 11 ]------------------------ +time | Tue Sep 15 00:00:00 3018 +no_epoch | Mon Sep 14 00:00:00 3018 +always_true | t +pg_epoch | Sat Sep 12 00:00:00 3018 +unix_epoch | Thu Sep 10 00:00:00 3018 +custom_1 | Sun Sep 13 00:00:00 3018 +custom_2 | Mon Sep 14 00:00:00 3018 + +SELECT time, time_bucket(INTERVAL '1 week', time) no_epoch, + time_bucket(INTERVAL '1 week', time) = time_bucket(INTERVAL '1 week', time, date '2000-01-03') always_true, + time_bucket(INTERVAL '1 week', time, date '2000-01-01') pg_epoch, + time_bucket(INTERVAL '1 week', time, (timestamp 'epoch')::date) unix_epoch, + time_bucket(INTERVAL '1 week', time, date '3018-09-13') custom_1, + time_bucket(INTERVAL '1 week', time, date '3018-09-14') custom_2 +FROM unnest(ARRAY[ + date '1999-12-31', + date '2000-01-01', + date '2000-01-02', + date '2000-01-03', + date '3018-09-12', + date '3018-09-13', + date '3018-09-14', + date '3018-09-15' + ]) AS time; +-[ RECORD 1 ]----------- +time | 12-31-1999 +no_epoch | 12-27-1999 +always_true | t +pg_epoch | 12-25-1999 +unix_epoch | 12-30-1999 +custom_1 | 12-26-1999 +custom_2 | 12-27-1999 +-[ RECORD 2 ]----------- +time | 01-01-2000 +no_epoch | 12-27-1999 +always_true | t +pg_epoch | 01-01-2000 +unix_epoch | 12-30-1999 +custom_1 | 12-26-1999 +custom_2 | 12-27-1999 +-[ RECORD 3 ]----------- +time | 01-02-2000 +no_epoch | 12-27-1999 +always_true | t +pg_epoch | 01-01-2000 +unix_epoch | 12-30-1999 +custom_1 | 01-02-2000 +custom_2 | 12-27-1999 +-[ RECORD 4 ]----------- +time | 01-03-2000 +no_epoch | 01-03-2000 +always_true | t +pg_epoch | 01-01-2000 +unix_epoch | 12-30-1999 +custom_1 | 01-02-2000 +custom_2 | 01-03-2000 +-[ RECORD 5 ]----------- +time | 09-12-3018 +no_epoch | 09-07-3018 +always_true | t +pg_epoch | 09-12-3018 +unix_epoch | 09-10-3018 +custom_1 | 09-06-3018 +custom_2 | 09-07-3018 +-[ RECORD 6 ]----------- +time | 09-13-3018 +no_epoch | 09-07-3018 +always_true | t +pg_epoch | 09-12-3018 +unix_epoch | 09-10-3018 +custom_1 | 09-13-3018 +custom_2 | 09-07-3018 +-[ RECORD 7 ]----------- +time | 09-14-3018 +no_epoch | 09-14-3018 +always_true | t +pg_epoch | 09-12-3018 +unix_epoch | 09-10-3018 +custom_1 | 09-13-3018 +custom_2 | 09-14-3018 +-[ RECORD 8 ]----------- +time | 09-15-3018 +no_epoch | 09-14-3018 +always_true | t +pg_epoch | 09-12-3018 +unix_epoch | 09-10-3018 +custom_1 | 09-13-3018 +custom_2 | 09-14-3018 + +\x +--really old origin works if date around that time +SELECT time, time_bucket(INTERVAL '1 week', time, timestamp without time zone '4710-11-24 01:01:01.0 BC') +FROM unnest(ARRAY[ + timestamp without time zone '4710-11-24 01:01:01.0 BC', + timestamp without time zone '4710-11-25 01:01:01.0 BC', + timestamp without time zone '2001-01-01', + timestamp without time zone '3001-01-01' + ]) AS time; + time | time_bucket +-----------------------------+----------------------------- + Sat Nov 24 01:01:01 4710 BC | Sat Nov 24 01:01:01 4710 BC + Sun Nov 25 01:01:01 4710 BC | Sat Nov 24 01:01:01 4710 BC + Mon Jan 01 00:00:00 2001 | Sat Dec 30 01:01:01 2000 + Thu Jan 01 00:00:00 3001 | Sat Dec 27 01:01:01 3000 +(4 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time, timestamp without time zone '294270-12-30 23:59:59.9999') +FROM unnest(ARRAY[ + timestamp without time zone '294270-12-29 23:59:59.9999', + timestamp without time zone '294270-12-30 23:59:59.9999', + timestamp without time zone '294270-12-31 23:59:59.9999', + timestamp without time zone '2001-01-01', + timestamp without time zone '3001-01-01' + ]) AS time; + time | time_bucket +---------------------------------+--------------------------------- + Thu Dec 29 23:59:59.9999 294270 | Fri Dec 23 23:59:59.9999 294270 + Fri Dec 30 23:59:59.9999 294270 | Fri Dec 30 23:59:59.9999 294270 + Sat Dec 31 23:59:59.9999 294270 | Fri Dec 30 23:59:59.9999 294270 + Mon Jan 01 00:00:00 2001 | Fri Dec 29 23:59:59.9999 2000 + Thu Jan 01 00:00:00 3001 | Fri Dec 26 23:59:59.9999 3000 +(5 rows) + +\set ON_ERROR_STOP 0 +--really old origin + very new data + long period errors +SELECT time, time_bucket(INTERVAL '100000 day', time, timestamp without time zone '4710-11-24 01:01:01.0 BC') +FROM unnest(ARRAY[ + timestamp without time zone '294270-12-31 23:59:59.9999' + ]) AS time; +ERROR: timestamp out of range +SELECT time, time_bucket(INTERVAL '100000 day', time, timestamp with time zone '4710-11-25 01:01:01.0 BC') +FROM unnest(ARRAY[ + timestamp with time zone '294270-12-30 23:59:59.9999' + ]) AS time; +ERROR: timestamp out of range +--really high origin + old data + long period errors out +SELECT time, time_bucket(INTERVAL '10000000 day', time, timestamp without time zone '294270-12-31 23:59:59.9999') +FROM unnest(ARRAY[ + timestamp without time zone '4710-11-24 01:01:01.0 BC' + ]) AS time; +ERROR: timestamp out of range +SELECT time, time_bucket(INTERVAL '10000000 day', time, timestamp with time zone '294270-12-31 23:59:59.9999') +FROM unnest(ARRAY[ + timestamp with time zone '4710-11-24 01:01:01.0 BC' + ]) AS time; +ERROR: timestamp out of range +\set ON_ERROR_STOP 1 +------------------------------------------- +--- Test time_bucket with month periods --- +------------------------------------------- +SET datestyle TO ISO; +SELECT + time::date, + time_bucket('1 month', time::date) AS "1m", + time_bucket('2 month', time::date) AS "2m", + time_bucket('3 month', time::date) AS "3m", + time_bucket('1 month', time::date, '2000-02-01'::date) AS "1m origin", + time_bucket('2 month', time::date, '2000-02-01'::date) AS "2m origin", + time_bucket('3 month', time::date, '2000-02-01'::date) AS "3m origin" +FROM generate_series('1990-01-03'::date,'1990-06-03'::date,'1month'::interval) time; + time | 1m | 2m | 3m | 1m origin | 2m origin | 3m origin +------------+------------+------------+------------+------------+------------+------------ + 1990-01-03 | 1990-01-01 | 1990-01-01 | 1990-01-01 | 1990-01-01 | 1989-12-01 | 1989-11-01 + 1990-02-03 | 1990-02-01 | 1990-01-01 | 1990-01-01 | 1990-02-01 | 1990-02-01 | 1990-02-01 + 1990-03-03 | 1990-03-01 | 1990-03-01 | 1990-01-01 | 1990-03-01 | 1990-02-01 | 1990-02-01 + 1990-04-03 | 1990-04-01 | 1990-03-01 | 1990-04-01 | 1990-04-01 | 1990-04-01 | 1990-02-01 + 1990-05-03 | 1990-05-01 | 1990-05-01 | 1990-04-01 | 1990-05-01 | 1990-04-01 | 1990-05-01 + 1990-06-03 | 1990-06-01 | 1990-05-01 | 1990-04-01 | 1990-06-01 | 1990-06-01 | 1990-05-01 +(6 rows) + +SELECT + time, + time_bucket('1 month', time) AS "1m", + time_bucket('2 month', time) AS "2m", + time_bucket('3 month', time) AS "3m", + time_bucket('1 month', time, '2000-02-01'::timestamp) AS "1m origin", + time_bucket('2 month', time, '2000-02-01'::timestamp) AS "2m origin", + time_bucket('3 month', time, '2000-02-01'::timestamp) AS "3m origin" +FROM generate_series('1990-01-03'::timestamp,'1990-06-03'::timestamp,'1month'::interval) time; + time | 1m | 2m | 3m | 1m origin | 2m origin | 3m origin +---------------------+---------------------+---------------------+---------------------+---------------------+---------------------+--------------------- + 1990-01-03 00:00:00 | 1990-01-01 00:00:00 | 1990-01-01 00:00:00 | 1990-01-01 00:00:00 | 1990-01-01 00:00:00 | 1989-12-01 00:00:00 | 1989-11-01 00:00:00 + 1990-02-03 00:00:00 | 1990-02-01 00:00:00 | 1990-01-01 00:00:00 | 1990-01-01 00:00:00 | 1990-02-01 00:00:00 | 1990-02-01 00:00:00 | 1990-02-01 00:00:00 + 1990-03-03 00:00:00 | 1990-03-01 00:00:00 | 1990-03-01 00:00:00 | 1990-01-01 00:00:00 | 1990-03-01 00:00:00 | 1990-02-01 00:00:00 | 1990-02-01 00:00:00 + 1990-04-03 00:00:00 | 1990-04-01 00:00:00 | 1990-03-01 00:00:00 | 1990-04-01 00:00:00 | 1990-04-01 00:00:00 | 1990-04-01 00:00:00 | 1990-02-01 00:00:00 + 1990-05-03 00:00:00 | 1990-05-01 00:00:00 | 1990-05-01 00:00:00 | 1990-04-01 00:00:00 | 1990-05-01 00:00:00 | 1990-04-01 00:00:00 | 1990-05-01 00:00:00 + 1990-06-03 00:00:00 | 1990-06-01 00:00:00 | 1990-05-01 00:00:00 | 1990-04-01 00:00:00 | 1990-06-01 00:00:00 | 1990-06-01 00:00:00 | 1990-05-01 00:00:00 +(6 rows) + +SELECT + time, + time_bucket('1 month', time) AS "1m", + time_bucket('2 month', time) AS "2m", + time_bucket('3 month', time) AS "3m", + time_bucket('1 month', time, '2000-02-01'::timestamptz) AS "1m origin", + time_bucket('2 month', time, '2000-02-01'::timestamptz) AS "2m origin", + time_bucket('3 month', time, '2000-02-01'::timestamptz) AS "3m origin" +FROM generate_series('1990-01-03'::timestamptz,'1990-06-03'::timestamptz,'1month'::interval) time; + time | 1m | 2m | 3m | 1m origin | 2m origin | 3m origin +------------------------+------------------------+------------------------+------------------------+------------------------+------------------------+------------------------ + 1990-01-03 00:00:00-05 | 1989-12-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1989-11-30 19:00:00-05 | 1989-10-31 19:00:00-05 + 1990-02-03 00:00:00-05 | 1990-01-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1990-01-31 19:00:00-05 | 1990-01-31 19:00:00-05 | 1990-01-31 19:00:00-05 + 1990-03-03 00:00:00-05 | 1990-02-28 19:00:00-05 | 1990-02-28 19:00:00-05 | 1989-12-31 19:00:00-05 | 1990-02-28 19:00:00-05 | 1990-01-31 19:00:00-05 | 1990-01-31 19:00:00-05 + 1990-04-03 00:00:00-04 | 1990-03-31 19:00:00-05 | 1990-02-28 19:00:00-05 | 1990-03-31 19:00:00-05 | 1990-03-31 19:00:00-05 | 1990-03-31 19:00:00-05 | 1990-01-31 19:00:00-05 + 1990-05-03 00:00:00-04 | 1990-04-30 20:00:00-04 | 1990-04-30 20:00:00-04 | 1990-03-31 19:00:00-05 | 1990-04-30 20:00:00-04 | 1990-03-31 19:00:00-05 | 1990-04-30 20:00:00-04 + 1990-06-03 00:00:00-04 | 1990-05-31 20:00:00-04 | 1990-04-30 20:00:00-04 | 1990-03-31 19:00:00-05 | 1990-05-31 20:00:00-04 | 1990-05-31 20:00:00-04 | 1990-04-30 20:00:00-04 +(6 rows) + +--------------------------------------- +--- Test time_bucket with timezones --- +--------------------------------------- +-- test NULL args +SELECT +time_bucket(NULL::interval,now(),'Europe/Berlin'), +time_bucket('1day',NULL::timestamptz,'Europe/Berlin'), +time_bucket('1day',now(),NULL::text), +time_bucket('1day','2020-02-03','Europe/Berlin',NULL), +time_bucket('1day','2020-02-03','Europe/Berlin','2020-04-01',NULL), +time_bucket('1day','2020-02-03','Europe/Berlin',NULL,NULL), +time_bucket('1day','2020-02-03','Europe/Berlin',"offset":=NULL::interval), +time_bucket('1day','2020-02-03','Europe/Berlin',origin:=NULL::timestamptz); + time_bucket | time_bucket | time_bucket | time_bucket | time_bucket | time_bucket | time_bucket | time_bucket +-------------+-------------+-------------+------------------------+------------------------+------------------------+------------------------+------------------------ + | | | 2020-02-02 18:00:00-05 | 2020-02-03 00:00:00-05 | 2020-02-02 18:00:00-05 | 2020-02-02 18:00:00-05 | 2020-02-02 18:00:00-05 +(1 row) + +SET datestyle TO ISO; +SELECT + time_bucket('1day', ts) AS "UTC", + time_bucket('1day', ts, 'Europe/Berlin') AS "Berlin", + time_bucket('1day', ts, 'Europe/London') AS "London", + time_bucket('1day', ts, 'America/New_York') AS "New York", + time_bucket('1day', ts, 'PST') AS "PST", + time_bucket('1day', ts, current_setting('timezone')) AS "current" +FROM generate_series('1999-12-31 17:00'::timestamptz,'2000-01-02 3:00'::timestamptz, '1hour'::interval) ts; + UTC | Berlin | London | New York | PST | current +------------------------+------------------------+------------------------+------------------------+------------------------+------------------------ + 1999-12-30 19:00:00-05 | 1999-12-30 18:00:00-05 | 1999-12-30 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-30 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-30 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-31 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-31 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-31 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 2000-01-01 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-02 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-02 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-02 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-02 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-02 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-02 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-02 00:00:00-05 | 2000-01-02 03:00:00-05 | 2000-01-02 00:00:00-05 +(35 rows) + +SELECT + time_bucket('1month', ts) AS "UTC", + time_bucket('1month', ts, 'Europe/Berlin') AS "Berlin", + time_bucket('1month', ts, 'America/New_York') AS "New York", + time_bucket('1month', ts, current_setting('timezone')) AS "current", + time_bucket('2month', ts, current_setting('timezone')) AS "2m", + time_bucket('2month', ts, current_setting('timezone'), '2000-02-01'::timestamp) AS "2m origin", + time_bucket('2month', ts, current_setting('timezone'), "offset":='14 day'::interval) AS "2m offset", + time_bucket('2month', ts, current_setting('timezone'), '2000-02-01'::timestamp, '7 day'::interval) AS "2m offset + origin" +FROM generate_series('1999-12-01'::timestamptz,'2000-09-01'::timestamptz, '9 day'::interval) ts; + UTC | Berlin | New York | current | 2m | 2m origin | 2m offset | 2m offset + origin +------------------------+------------------------+------------------------+------------------------+------------------------+------------------------+------------------------+------------------------ + 1999-11-30 19:00:00-05 | 1999-11-30 18:00:00-05 | 1999-12-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-10-08 00:00:00-04 + 1999-11-30 19:00:00-05 | 1999-11-30 18:00:00-05 | 1999-12-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-11-30 19:00:00-05 | 1999-11-30 18:00:00-05 | 1999-12-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-11-30 19:00:00-05 | 1999-11-30 18:00:00-05 | 1999-12-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 2000-01-31 19:00:00-05 | 2000-01-31 18:00:00-05 | 2000-02-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 2000-01-31 19:00:00-05 | 2000-01-31 18:00:00-05 | 2000-02-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-01-31 19:00:00-05 | 2000-01-31 18:00:00-05 | 2000-02-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-01-31 19:00:00-05 | 2000-01-31 18:00:00-05 | 2000-02-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-02-29 19:00:00-05 | 2000-02-29 18:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-02-29 19:00:00-05 | 2000-02-29 18:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-02-29 19:00:00-05 | 2000-02-29 18:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-03-31 19:00:00-05 | 2000-03-31 17:00:00-05 | 2000-04-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-03-31 19:00:00-05 | 2000-03-31 17:00:00-05 | 2000-04-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-04-08 00:00:00-04 + 2000-03-31 19:00:00-05 | 2000-03-31 17:00:00-05 | 2000-04-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-04-08 00:00:00-04 + 2000-04-30 20:00:00-04 | 2000-04-30 18:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-04-08 00:00:00-04 + 2000-04-30 20:00:00-04 | 2000-04-30 18:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-04-08 00:00:00-04 + 2000-04-30 20:00:00-04 | 2000-04-30 18:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-04-01 00:00:00-05 | 2000-05-15 00:00:00-04 | 2000-04-08 00:00:00-04 + 2000-04-30 20:00:00-04 | 2000-04-30 18:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-04-01 00:00:00-05 | 2000-05-15 00:00:00-04 | 2000-04-08 00:00:00-04 + 2000-05-31 20:00:00-04 | 2000-05-31 18:00:00-04 | 2000-06-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-04-08 00:00:00-04 + 2000-05-31 20:00:00-04 | 2000-05-31 18:00:00-04 | 2000-06-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-05-31 20:00:00-04 | 2000-05-31 18:00:00-04 | 2000-06-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-06-30 20:00:00-04 | 2000-06-30 18:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-06-30 20:00:00-04 | 2000-06-30 18:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-06-30 20:00:00-04 | 2000-06-30 18:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-06-30 20:00:00-04 | 2000-06-30 18:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-07-31 20:00:00-04 | 2000-07-31 18:00:00-04 | 2000-08-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-08-08 00:00:00-04 + 2000-07-31 20:00:00-04 | 2000-07-31 18:00:00-04 | 2000-08-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-08-08 00:00:00-04 + 2000-07-31 20:00:00-04 | 2000-07-31 18:00:00-04 | 2000-08-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-08-08 00:00:00-04 +(31 rows) + +RESET datestyle; +------------------------------------------------------------ +--- Test timescaledb_experimental.time_bucket_ng function -- +------------------------------------------------------------ +-- not supported functionality +\set ON_ERROR_STOP 0 +SELECT timescaledb_experimental.time_bucket_ng('1 hour', '2001-02-03' :: date) AS result; +ERROR: interval must be either days and weeks, or months and years +SELECT timescaledb_experimental.time_bucket_ng('0 days', '2001-02-03' :: date) AS result; +ERROR: interval must be at least one day +SELECT timescaledb_experimental.time_bucket_ng('1 month', '2001-02-03' :: date, origin => '2000-01-02') AS result; +ERROR: origin must be the first day of the month +HINT: When using timestamptz-version of the function, 'origin' is converted to provided 'timezone'. +SELECT timescaledb_experimental.time_bucket_ng('1 month', '2000-01-02' :: date, origin => '2001-01-01') AS result; + result +------------ + 01-01-2000 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 day', '2000-01-02' :: date, origin => '2001-01-01') AS result; +ERROR: origin must be before the given date +SELECT timescaledb_experimental.time_bucket_ng('1 month 3 hours', '2021-11-22' :: timestamp) AS result; +ERROR: interval can't combine months with minutes or hours +-- timestamp is less than the default 'origin' value +SELECT timescaledb_experimental.time_bucket_ng('1 day', '1999-01-01 12:34:56 MSK' :: timestamptz, timezone => 'MSK'); +ERROR: origin must be before the given date +-- 'origin' in Europe/Moscow timezone is not the first day of the month at given time zone (UTC in this case) +select timescaledb_experimental.time_bucket_ng('1 month', '2021-07-12 12:34:56 Europe/Moscow' :: timestamptz, origin => '2021-06-01 00:00:00 Europe/Moscow' :: timestamptz, timezone => 'UTC'); +ERROR: origin must be the first day of the month +HINT: When using timestamptz-version of the function, 'origin' is converted to provided 'timezone'. +\set ON_ERROR_STOP 1 +-- wrappers +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-11-22' :: timestamp) AS result; + result +-------------------------- + Fri Jan 01 00:00:00 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-11-22' :: timestamptz) AS result; + result +------------------------------ + Fri Jan 01 00:00:00 2021 EST +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-11-22' :: timestamp, origin => '2021-06-01') AS result; + result +-------------------------- + Tue Jun 01 00:00:00 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-11-22' :: timestamptz, origin => '2021-06-01') AS result; + result +------------------------------ + Tue Jun 01 00:00:00 2021 EDT +(1 row) + +-- null argument +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: date) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamp) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamptz) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamptz, timezone => 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: date, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamp, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamptz, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamptz, origin => '2021-06-01', timezone => 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +-- null interval +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12' :: date) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamp) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamptz) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamptz, 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12' :: date, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamp, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamptz, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamptz, origin => '2021-06-01', timezone => 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +-- null origin +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12' :: date, origin => null) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamp, origin => null) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, origin => null) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, origin => null, timezone => 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +-- infinity argument +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: date) AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamp) AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamptz) AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamptz, timezone => 'Europe/Moscow') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: date, origin => '2021-06-01') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamp, origin => '2021-06-01') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamptz, origin => '2021-06-01') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamptz, origin => '2021-06-01', timezone => 'Europe/Moscow') AS result; + result +---------- + infinity +(1 row) + +-- test for specific code path: hours/minutes/seconds interval and timestamp argument +SELECT timescaledb_experimental.time_bucket_ng('12 hours', 'infinity' :: timestamp) AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('12 hours', 'infinity' :: timestamp, origin => '2021-06-01') AS result; + result +---------- + infinity +(1 row) + +-- infinite origin +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12' :: date, origin => 'infinity') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamp, origin => 'infinity') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, origin => 'infinity') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, origin => 'infinity', timezone => 'Europe/Moscow') AS result; + result +---------- + infinity +(1 row) + +-- test for specific code path: hours/minutes/seconds interval and timestamp argument +SELECT timescaledb_experimental.time_bucket_ng('12 hours', '2021-07-12 12:34:56' :: timestamp, origin => 'infinity') AS result; + result +---------- + infinity +(1 row) + +-- test for invalid timezone argument +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, timezone => null) AS result; + result +-------- + +(1 row) + +\set ON_ERROR_STOP 0 +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, timezone => 'Europe/Ololondon') AS result; +ERROR: time zone "Europe/Ololondon" not recognized +\set ON_ERROR_STOP 1 +-- Make sure time_bucket_ng() supports seconds, minutes, and hours. +-- We happen to know that the internal implementation is the same +-- as for time_bucket(), thus there is no reason to execute all the tests +-- we already have for time_bucket(). These two functions will most likely +-- be merged eventually anyway. +SELECT timescaledb_experimental.time_bucket_ng('30 seconds', '2021-07-12 12:34:56' :: timestamp) AS result; + result +-------------------------- + Mon Jul 12 12:34:30 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('15 minutes', '2021-07-12 12:34:56' :: timestamp) AS result; + result +-------------------------- + Mon Jul 12 12:30:00 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('6 hours', '2021-07-12 12:34:56' :: timestamp) AS result; + result +-------------------------- + Mon Jul 12 12:00:00 2021 +(1 row) + +-- Same as above, but with provided 'origin' argument. +SELECT timescaledb_experimental.time_bucket_ng('30 seconds', '2021-07-12 12:34:56' :: timestamp, origin => '2021-07-12 12:10:00') AS result; + result +-------------------------- + Mon Jul 12 12:34:30 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('15 minutes', '2021-07-12 12:34:56' :: timestamp, origin => '2021-07-12 12:10:00') AS result; + result +-------------------------- + Mon Jul 12 12:25:00 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('6 hours', '2021-07-12 12:34:56' :: timestamp, origin => '2021-07-12 12:10:00') AS result; + result +-------------------------- + Mon Jul 12 12:10:00 2021 +(1 row) + +-- N days / weeks buckets +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 day', d), 'YYYY-MM-DD') AS d1, + to_char(timescaledb_experimental.time_bucket_ng('2 days', d), 'YYYY-MM-DD') AS d2, + to_char(timescaledb_experimental.time_bucket_ng('3 days', d), 'YYYY-MM-DD') AS d3, + to_char(timescaledb_experimental.time_bucket_ng('1 week', d), 'YYYY-MM-DD') AS w1, + to_char(timescaledb_experimental.time_bucket_ng('1 week 2 days', d), 'YYYY-MM-DD') AS w1d2 +FROM generate_series('2020-01-01' :: date, '2020-01-12', '1 day') AS ts, + unnest(array[ts :: date]) AS d; + d | d1 | d2 | d3 | w1 | w1d2 +------------+------------+------------+------------+------------+------------ + 2020-01-01 | 2020-01-01 | 2019-12-31 | 2020-01-01 | 2019-12-28 | 2019-12-26 + 2020-01-02 | 2020-01-02 | 2020-01-02 | 2020-01-01 | 2019-12-28 | 2019-12-26 + 2020-01-03 | 2020-01-03 | 2020-01-02 | 2020-01-01 | 2019-12-28 | 2019-12-26 + 2020-01-04 | 2020-01-04 | 2020-01-04 | 2020-01-04 | 2020-01-04 | 2020-01-04 + 2020-01-05 | 2020-01-05 | 2020-01-04 | 2020-01-04 | 2020-01-04 | 2020-01-04 + 2020-01-06 | 2020-01-06 | 2020-01-06 | 2020-01-04 | 2020-01-04 | 2020-01-04 + 2020-01-07 | 2020-01-07 | 2020-01-06 | 2020-01-07 | 2020-01-04 | 2020-01-04 + 2020-01-08 | 2020-01-08 | 2020-01-08 | 2020-01-07 | 2020-01-04 | 2020-01-04 + 2020-01-09 | 2020-01-09 | 2020-01-08 | 2020-01-07 | 2020-01-04 | 2020-01-04 + 2020-01-10 | 2020-01-10 | 2020-01-10 | 2020-01-10 | 2020-01-04 | 2020-01-04 + 2020-01-11 | 2020-01-11 | 2020-01-10 | 2020-01-10 | 2020-01-11 | 2020-01-04 + 2020-01-12 | 2020-01-12 | 2020-01-12 | 2020-01-10 | 2020-01-11 | 2020-01-04 +(12 rows) + +-- N days / weeks buckets with given 'origin' +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 day', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS d1, + to_char(timescaledb_experimental.time_bucket_ng('2 days', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS d2, + to_char(timescaledb_experimental.time_bucket_ng('3 days', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS d3, + to_char(timescaledb_experimental.time_bucket_ng('1 week', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS w1, + to_char(timescaledb_experimental.time_bucket_ng('1 week 2 days', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS w1d2 +FROM generate_series('2020-01-01' :: date, '2020-01-12', '1 day') AS ts, + unnest(array[ts :: date]) AS d; + d | d1 | d2 | d3 | w1 | w1d2 +------------+------------+------------+------------+------------+------------ + 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-01-02 | 2020-01-02 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-01-03 | 2020-01-03 | 2020-01-03 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-01-04 | 2020-01-04 | 2020-01-03 | 2020-01-04 | 2020-01-01 | 2020-01-01 + 2020-01-05 | 2020-01-05 | 2020-01-05 | 2020-01-04 | 2020-01-01 | 2020-01-01 + 2020-01-06 | 2020-01-06 | 2020-01-05 | 2020-01-04 | 2020-01-01 | 2020-01-01 + 2020-01-07 | 2020-01-07 | 2020-01-07 | 2020-01-07 | 2020-01-01 | 2020-01-01 + 2020-01-08 | 2020-01-08 | 2020-01-07 | 2020-01-07 | 2020-01-08 | 2020-01-01 + 2020-01-09 | 2020-01-09 | 2020-01-09 | 2020-01-07 | 2020-01-08 | 2020-01-01 + 2020-01-10 | 2020-01-10 | 2020-01-09 | 2020-01-10 | 2020-01-08 | 2020-01-10 + 2020-01-11 | 2020-01-11 | 2020-01-11 | 2020-01-10 | 2020-01-08 | 2020-01-10 + 2020-01-12 | 2020-01-12 | 2020-01-11 | 2020-01-10 | 2020-01-08 | 2020-01-10 +(12 rows) + +-- N month buckets +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 month', d), 'YYYY-MM-DD') AS m1, + to_char(timescaledb_experimental.time_bucket_ng('2 month', d), 'YYYY-MM-DD') AS m2, + to_char(timescaledb_experimental.time_bucket_ng('3 month', d), 'YYYY-MM-DD') AS m3, + to_char(timescaledb_experimental.time_bucket_ng('4 month', d), 'YYYY-MM-DD') AS m4, + to_char(timescaledb_experimental.time_bucket_ng('5 month', d), 'YYYY-MM-DD') AS m5 +FROM generate_series('2020-01-01' :: date, '2020-12-01', '1 month') AS ts, + unnest(array[ts :: date]) AS d; + d | m1 | m2 | m3 | m4 | m5 +------------+------------+------------+------------+------------+------------ + 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-02-01 | 2020-02-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-03-01 | 2020-03-01 | 2020-03-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-04-01 | 2020-04-01 | 2020-03-01 | 2020-04-01 | 2020-01-01 | 2020-01-01 + 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-04-01 | 2020-05-01 | 2020-01-01 + 2020-06-01 | 2020-06-01 | 2020-05-01 | 2020-04-01 | 2020-05-01 | 2020-06-01 + 2020-07-01 | 2020-07-01 | 2020-07-01 | 2020-07-01 | 2020-05-01 | 2020-06-01 + 2020-08-01 | 2020-08-01 | 2020-07-01 | 2020-07-01 | 2020-05-01 | 2020-06-01 + 2020-09-01 | 2020-09-01 | 2020-09-01 | 2020-07-01 | 2020-09-01 | 2020-06-01 + 2020-10-01 | 2020-10-01 | 2020-09-01 | 2020-10-01 | 2020-09-01 | 2020-06-01 + 2020-11-01 | 2020-11-01 | 2020-11-01 | 2020-10-01 | 2020-09-01 | 2020-11-01 + 2020-12-01 | 2020-12-01 | 2020-11-01 | 2020-10-01 | 2020-09-01 | 2020-11-01 +(12 rows) + +-- N month buckets with given 'origin' +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m1, + to_char(timescaledb_experimental.time_bucket_ng('2 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m2, + to_char(timescaledb_experimental.time_bucket_ng('3 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m3, + to_char(timescaledb_experimental.time_bucket_ng('4 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m4, + to_char(timescaledb_experimental.time_bucket_ng('5 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m5 +FROM generate_series('2020-01-01' :: date, '2020-12-01', '1 month') AS ts, + unnest(array[ts :: date]) AS d; + d | m1 | m2 | m3 | m4 | m5 +------------+------------+------------+------------+------------+------------ + 2020-01-01 | 2020-01-01 | 2020-01-01 | 2019-11-01 | 2020-01-01 | 2019-10-01 + 2020-02-01 | 2020-02-01 | 2020-01-01 | 2020-02-01 | 2020-01-01 | 2019-10-01 + 2020-03-01 | 2020-03-01 | 2020-03-01 | 2020-02-01 | 2020-01-01 | 2020-03-01 + 2020-04-01 | 2020-04-01 | 2020-03-01 | 2020-02-01 | 2020-01-01 | 2020-03-01 + 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-03-01 + 2020-06-01 | 2020-06-01 | 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-03-01 + 2020-07-01 | 2020-07-01 | 2020-07-01 | 2020-05-01 | 2020-05-01 | 2020-03-01 + 2020-08-01 | 2020-08-01 | 2020-07-01 | 2020-08-01 | 2020-05-01 | 2020-08-01 + 2020-09-01 | 2020-09-01 | 2020-09-01 | 2020-08-01 | 2020-09-01 | 2020-08-01 + 2020-10-01 | 2020-10-01 | 2020-09-01 | 2020-08-01 | 2020-09-01 | 2020-08-01 + 2020-11-01 | 2020-11-01 | 2020-11-01 | 2020-11-01 | 2020-09-01 | 2020-08-01 + 2020-12-01 | 2020-12-01 | 2020-11-01 | 2020-11-01 | 2020-09-01 | 2020-08-01 +(12 rows) + +-- N years / N years, M month buckets +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 year', d), 'YYYY-MM-DD') AS y1, + to_char(timescaledb_experimental.time_bucket_ng('1 year 6 month', d), 'YYYY-MM-DD') AS y1m6, + to_char(timescaledb_experimental.time_bucket_ng('2 years', d), 'YYYY-MM-DD') AS y2, + to_char(timescaledb_experimental.time_bucket_ng('2 years 6 month', d), 'YYYY-MM-DD') AS y2m6, + to_char(timescaledb_experimental.time_bucket_ng('3 years', d), 'YYYY-MM-DD') AS y3 +FROM generate_series('2015-01-01' :: date, '2020-12-01', '6 month') AS ts, + unnest(array[ts :: date]) AS d; + d | y1 | y1m6 | y2 | y2m6 | y3 +------------+------------+------------+------------+------------+------------ + 2015-01-01 | 2015-01-01 | 2015-01-01 | 2014-01-01 | 2015-01-01 | 2015-01-01 + 2015-07-01 | 2015-01-01 | 2015-01-01 | 2014-01-01 | 2015-01-01 | 2015-01-01 + 2016-01-01 | 2016-01-01 | 2015-01-01 | 2016-01-01 | 2015-01-01 | 2015-01-01 + 2016-07-01 | 2016-01-01 | 2016-07-01 | 2016-01-01 | 2015-01-01 | 2015-01-01 + 2017-01-01 | 2017-01-01 | 2016-07-01 | 2016-01-01 | 2015-01-01 | 2015-01-01 + 2017-07-01 | 2017-01-01 | 2016-07-01 | 2016-01-01 | 2017-07-01 | 2015-01-01 + 2018-01-01 | 2018-01-01 | 2018-01-01 | 2018-01-01 | 2017-07-01 | 2018-01-01 + 2018-07-01 | 2018-01-01 | 2018-01-01 | 2018-01-01 | 2017-07-01 | 2018-01-01 + 2019-01-01 | 2019-01-01 | 2018-01-01 | 2018-01-01 | 2017-07-01 | 2018-01-01 + 2019-07-01 | 2019-01-01 | 2019-07-01 | 2018-01-01 | 2017-07-01 | 2018-01-01 + 2020-01-01 | 2020-01-01 | 2019-07-01 | 2020-01-01 | 2020-01-01 | 2018-01-01 + 2020-07-01 | 2020-01-01 | 2019-07-01 | 2020-01-01 | 2020-01-01 | 2018-01-01 +(12 rows) + +-- N years / N years, M month buckets with given 'origin' +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 year', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y1, + to_char(timescaledb_experimental.time_bucket_ng('1 year 6 month', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y1m6, + to_char(timescaledb_experimental.time_bucket_ng('2 years', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y2, + to_char(timescaledb_experimental.time_bucket_ng('2 years 6 month', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y2m6, + to_char(timescaledb_experimental.time_bucket_ng('3 years', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y3 +FROM generate_series('2015-01-01' :: date, '2020-12-01', '6 month') AS ts, + unnest(array[ts :: date]) AS d; + d | y1 | y1m6 | y2 | y2m6 | y3 +------------+------------+------------+------------+------------+------------ + 2015-01-01 | 2014-06-01 | 2013-12-01 | 2014-06-01 | 2012-12-01 | 2012-06-01 + 2015-07-01 | 2015-06-01 | 2015-06-01 | 2014-06-01 | 2015-06-01 | 2015-06-01 + 2016-01-01 | 2015-06-01 | 2015-06-01 | 2014-06-01 | 2015-06-01 | 2015-06-01 + 2016-07-01 | 2016-06-01 | 2015-06-01 | 2016-06-01 | 2015-06-01 | 2015-06-01 + 2017-01-01 | 2016-06-01 | 2016-12-01 | 2016-06-01 | 2015-06-01 | 2015-06-01 + 2017-07-01 | 2017-06-01 | 2016-12-01 | 2016-06-01 | 2015-06-01 | 2015-06-01 + 2018-01-01 | 2017-06-01 | 2016-12-01 | 2016-06-01 | 2017-12-01 | 2015-06-01 + 2018-07-01 | 2018-06-01 | 2018-06-01 | 2018-06-01 | 2017-12-01 | 2018-06-01 + 2019-01-01 | 2018-06-01 | 2018-06-01 | 2018-06-01 | 2017-12-01 | 2018-06-01 + 2019-07-01 | 2019-06-01 | 2018-06-01 | 2018-06-01 | 2017-12-01 | 2018-06-01 + 2020-01-01 | 2019-06-01 | 2019-12-01 | 2018-06-01 | 2017-12-01 | 2018-06-01 + 2020-07-01 | 2020-06-01 | 2019-12-01 | 2020-06-01 | 2020-06-01 | 2018-06-01 +(12 rows) + +-- Test timezones support with different bucket sizes +BEGIN; +-- Timestamptz type is displayed in the session timezone. +-- To get consistent results during the test we temporary set the session +-- timezone to the known one. +SET TIME ZONE '+00'; +-- Moscow is UTC+3 in the year 2021. Let's say you are dealing with '1 day' bucket. +-- In order to calculate the beginning of the bucket you have to take LOCAL +-- Moscow time and throw away the time. You will get the midnight. The new day +-- starts 3 hours EARLIER in Moscow than in UTC+0 time zone, thus resulting +-- timestamp will be 3 hours LESS than for UTC+0. +SELECT bs, tz, to_char(ts_out, 'YYYY-MM-DD HH24:MI:SS TZ') as res +FROM unnest(array['Europe/Moscow', 'UTC']) as tz, + unnest(array['12 hours', '1 day', '1 month', '4 months', '1 year']) as bs, + unnest(array['2021-07-12 12:34:56 Europe/Moscow' :: timestamptz]) as ts_in, + unnest(array[timescaledb_experimental.time_bucket_ng(bs :: interval, ts_in, timezone => tz)]) as ts_out +ORDER BY tz, bs :: interval; + bs | tz | res +----------+---------------+------------------------- + 12 hours | Europe/Moscow | 2021-07-12 09:00:00 +00 + 1 day | Europe/Moscow | 2021-07-11 21:00:00 +00 + 1 month | Europe/Moscow | 2021-06-30 21:00:00 +00 + 4 months | Europe/Moscow | 2021-04-30 21:00:00 +00 + 1 year | Europe/Moscow | 2020-12-31 21:00:00 +00 + 12 hours | UTC | 2021-07-12 00:00:00 +00 + 1 day | UTC | 2021-07-12 00:00:00 +00 + 1 month | UTC | 2021-07-01 00:00:00 +00 + 4 months | UTC | 2021-05-01 00:00:00 +00 + 1 year | UTC | 2021-01-01 00:00:00 +00 +(10 rows) + +-- Same as above, but with 'origin' +SELECT bs, tz, to_char(ts_out, 'YYYY-MM-DD HH24:MI:SS TZ') as res +FROM unnest(array['Europe/Moscow']) as tz, + unnest(array['12 hours', '1 day', '1 month', '4 months', '1 year']) as bs, + unnest(array['2021-07-12 12:34:56 Europe/Moscow' :: timestamptz]) as ts_in, + unnest(array['2021-06-01 00:00:00 Europe/Moscow' :: timestamptz]) as origin_in, + unnest(array[timescaledb_experimental.time_bucket_ng(bs :: interval, ts_in, origin => origin_in, timezone => tz)]) as ts_out +ORDER BY tz, bs :: interval; + bs | tz | res +----------+---------------+------------------------- + 12 hours | Europe/Moscow | 2021-07-12 09:00:00 +00 + 1 day | Europe/Moscow | 2021-07-11 21:00:00 +00 + 1 month | Europe/Moscow | 2021-06-30 21:00:00 +00 + 4 months | Europe/Moscow | 2021-05-31 21:00:00 +00 + 1 year | Europe/Moscow | 2021-05-31 21:00:00 +00 +(5 rows) + +-- Overwritten origin allows to work with dates earlier than the default origin +SELECT to_char(timescaledb_experimental.time_bucket_ng('1 day', '1999-01-01 12:34:56 MSK' :: timestamptz, origin => '1900-01-01 00:00:00 MSK', timezone => 'MSK'), 'YYYY-MM-DD HH24:MI:SS TZ'); + to_char +------------------------- + 1998-12-31 21:00:00 +00 +(1 row) + +-- Restore previously used time zone. +ROLLBACK; +------------------------------------- +--- Test time input functions -- +------------------------------------- +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE OR REPLACE FUNCTION test.interval_to_internal(coltype REGTYPE, value ANYELEMENT = NULL::BIGINT) RETURNS BIGINT +AS :MODULE_PATHNAME, 'ts_dimension_interval_to_internal_test' LANGUAGE C VOLATILE; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT test.interval_to_internal('TIMESTAMP'::regtype, INTERVAL '1 day'); + interval_to_internal +---------------------- + 86400000000 +(1 row) + +SELECT test.interval_to_internal('TIMESTAMP'::regtype, 86400000000); + interval_to_internal +---------------------- + 86400000000 +(1 row) + +---should give warning +SELECT test.interval_to_internal('TIMESTAMP'::regtype, 86400); +WARNING: unexpected interval: smaller than one second +HINT: The interval is specified in microseconds. + interval_to_internal +---------------------- + 86400 +(1 row) + +SELECT test.interval_to_internal('TIMESTAMP'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + +SELECT test.interval_to_internal('BIGINT'::regtype, 2147483649::bigint); + interval_to_internal +---------------------- + 2147483649 +(1 row) + +-- Default interval for integer is supported as part of +-- hypertable generalization +SELECT test.interval_to_internal('INT'::regtype); + interval_to_internal +---------------------- + 100000 +(1 row) + +SELECT test.interval_to_internal('SMALLINT'::regtype); + interval_to_internal +---------------------- + 10000 +(1 row) + +SELECT test.interval_to_internal('BIGINT'::regtype); + interval_to_internal +---------------------- + 1000000 +(1 row) + +SELECT test.interval_to_internal('TIMESTAMPTZ'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + +SELECT test.interval_to_internal('TIMESTAMP'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + +SELECT test.interval_to_internal('DATE'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + +\set VERBOSITY terse +\set ON_ERROR_STOP 0 +SELECT test.interval_to_internal('INT'::regtype, 2147483649::bigint); +ERROR: invalid interval: must be between 1 and 2147483647 +SELECT test.interval_to_internal('SMALLINT'::regtype, 32768::bigint); +ERROR: invalid interval: must be between 1 and 32767 +SELECT test.interval_to_internal('TEXT'::regtype, 32768::bigint); +ERROR: invalid type for dimension "testcol" +SELECT test.interval_to_internal('INT'::regtype, INTERVAL '1 day'); +ERROR: invalid interval type for integer dimension +\set ON_ERROR_STOP 1 diff --git a/test/expected/timestamp-16.out b/test/expected/timestamp-16.out new file mode 100644 index 00000000000..a502453dfea --- /dev/null +++ b/test/expected/timestamp-16.out @@ -0,0 +1,2061 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- Utility function for grouping/slotting time with a given interval. +CREATE OR REPLACE FUNCTION date_group( + field timestamp, + group_interval interval +) + RETURNS timestamp LANGUAGE SQL STABLE AS +$BODY$ + SELECT to_timestamp((EXTRACT(EPOCH from $1)::int / + EXTRACT(EPOCH from group_interval)::int) * + EXTRACT(EPOCH from group_interval)::int)::timestamp; +$BODY$; +CREATE TABLE PUBLIC."testNs" ( + "timeCustom" TIMESTAMP NOT NULL, + device_id TEXT NOT NULL, + series_0 DOUBLE PRECISION NULL, + series_1 DOUBLE PRECISION NULL, + series_2 DOUBLE PRECISION NULL, + series_bool BOOLEAN NULL +); +CREATE INDEX ON PUBLIC."testNs" (device_id, "timeCustom" DESC NULLS LAST) WHERE device_id IS NOT NULL; +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE SCHEMA "testNs" AUTHORIZATION :ROLE_DEFAULT_PERM_USER; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT * FROM create_hypertable('"public"."testNs"', 'timeCustom', 'device_id', 2, associated_schema_name=>'testNs' ); +WARNING: column type "timestamp without time zone" used for "timeCustom" does not follow best practices + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 1 | public | testNs | t +(1 row) + +\c :TEST_DBNAME +INSERT INTO PUBLIC."testNs"("timeCustom", device_id, series_0, series_1) VALUES +('2009-11-12T01:00:00+00:00', 'dev1', 1.5, 1), +('2009-11-12T01:00:00+00:00', 'dev1', 1.5, 2), +('2009-11-10T23:00:02+00:00', 'dev1', 2.5, 3); +INSERT INTO PUBLIC."testNs"("timeCustom", device_id, series_0, series_1) VALUES +('2009-11-10T23:00:00+00:00', 'dev2', 1.5, 1), +('2009-11-10T23:00:00+00:00', 'dev2', 1.5, 2); +SELECT * FROM PUBLIC."testNs"; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +--------------------------+-----------+----------+----------+----------+------------- + Thu Nov 12 01:00:00 2009 | dev1 | 1.5 | 1 | | + Thu Nov 12 01:00:00 2009 | dev1 | 1.5 | 2 | | + Tue Nov 10 23:00:02 2009 | dev1 | 2.5 | 3 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 1 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 2 | | +(5 rows) + +SET client_min_messages = WARNING; +\echo 'The next 2 queries will differ in output between UTC and EST since the mod is on the 100th hour UTC' +The next 2 queries will differ in output between UTC and EST since the mod is on the 100th hour UTC +SET timezone = 'UTC'; +SELECT date_group("timeCustom", '100 days') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC; + time | sum +--------------------------+----- + Sun Sep 13 00:00:00 2009 | 8.5 +(1 row) + +SET timezone = 'EST'; +SELECT date_group("timeCustom", '100 days') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC; + time | sum +--------------------------+----- + Sat Sep 12 19:00:00 2009 | 8.5 +(1 row) + +\echo 'The rest of the queries will be the same in output between UTC and EST' +The rest of the queries will be the same in output between UTC and EST +SET timezone = 'UTC'; +SELECT date_group("timeCustom", '1 day') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC; + time | sum +--------------------------+----- + Tue Nov 10 00:00:00 2009 | 5.5 + Thu Nov 12 00:00:00 2009 | 3 +(2 rows) + +SET timezone = 'EST'; +SELECT date_group("timeCustom", '1 day') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC; + time | sum +--------------------------+----- + Mon Nov 09 19:00:00 2009 | 5.5 + Wed Nov 11 19:00:00 2009 | 3 +(2 rows) + +SET timezone = 'UTC'; +SELECT * +FROM PUBLIC."testNs" +WHERE "timeCustom" >= TIMESTAMP '2009-11-10T23:00:00' +AND "timeCustom" < TIMESTAMP '2009-11-12T01:00:00' ORDER BY "timeCustom" DESC, device_id, series_1; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +--------------------------+-----------+----------+----------+----------+------------- + Tue Nov 10 23:00:02 2009 | dev1 | 2.5 | 3 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 1 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 2 | | +(3 rows) + +SET timezone = 'EST'; +SELECT * +FROM PUBLIC."testNs" +WHERE "timeCustom" >= TIMESTAMP '2009-11-10T23:00:00' +AND "timeCustom" < TIMESTAMP '2009-11-12T01:00:00' ORDER BY "timeCustom" DESC, device_id, series_1; + timeCustom | device_id | series_0 | series_1 | series_2 | series_bool +--------------------------+-----------+----------+----------+----------+------------- + Tue Nov 10 23:00:02 2009 | dev1 | 2.5 | 3 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 1 | | + Tue Nov 10 23:00:00 2009 | dev2 | 1.5 | 2 | | +(3 rows) + +SET timezone = 'UTC'; +SELECT date_group("timeCustom", '1 day') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC LIMIT 2; + time | sum +--------------------------+----- + Tue Nov 10 00:00:00 2009 | 5.5 + Thu Nov 12 00:00:00 2009 | 3 +(2 rows) + +SET timezone = 'EST'; +SELECT date_group("timeCustom", '1 day') AS time, sum(series_0) +FROM PUBLIC."testNs" GROUP BY time ORDER BY time ASC LIMIT 2; + time | sum +--------------------------+----- + Mon Nov 09 19:00:00 2009 | 5.5 + Wed Nov 11 19:00:00 2009 | 3 +(2 rows) + +------------------------------------ +-- Test time conversion functions -- +------------------------------------ +\set ON_ERROR_STOP 0 +SET timezone = 'UTC'; +-- Conversion to timestamp using Postgres built-in function taking +-- double. Gives inaccurate result on Postgres <= 9.6.2. Accurate on +-- Postgres >= 9.6.3. +SELECT to_timestamp(1486480176.236538); + to_timestamp +------------------------------------- + Tue Feb 07 15:09:36.236538 2017 UTC +(1 row) + +-- extension-specific version taking microsecond UNIX timestamp +SELECT _timescaledb_functions.to_timestamp(1486480176236538); + to_timestamp +------------------------------------- + Tue Feb 07 15:09:36.236538 2017 UTC +(1 row) + +-- Should be the inverse of the statement above. +SELECT _timescaledb_functions.to_unix_microseconds('2017-02-07 15:09:36.236538+00'); + to_unix_microseconds +---------------------- + 1486480176236538 +(1 row) + +-- For timestamps, BIGINT MAX represents +Infinity and BIGINT MIN +-- -Infinity. We keep this notion for UNIX epoch time: +SELECT _timescaledb_functions.to_unix_microseconds('+infinity'); + to_unix_microseconds +---------------------- + 9223372036854775807 +(1 row) + +SELECT _timescaledb_functions.to_timestamp(9223372036854775807); + to_timestamp +-------------- + infinity +(1 row) + +SELECT _timescaledb_functions.to_unix_microseconds('-infinity'); + to_unix_microseconds +---------------------- + -9223372036854775808 +(1 row) + +SELECT _timescaledb_functions.to_timestamp(-9223372036854775808); + to_timestamp +-------------- + -infinity +(1 row) + +-- In UNIX microseconds, the largest bigint value below infinity +-- (BIGINT MAX) is smaller than internal date upper bound and should +-- therefore be OK. Further, converting to the internal postgres epoch +-- cannot overflow a 64-bit INTEGER since the postgres epoch is at a +-- later date compared to the UNIX epoch, and is therefore represented +-- by a smaller number +SELECT _timescaledb_functions.to_timestamp(9223372036854775806); + to_timestamp +--------------------------------------- + Sun Jan 10 04:00:54.775806 294247 UTC +(1 row) + +-- Julian day zero is -210866803200000000 microseconds from UNIX epoch +SELECT _timescaledb_functions.to_timestamp(-210866803200000000); + to_timestamp +--------------------------------- + Mon Nov 24 00:00:00 4714 UTC BC +(1 row) + +\set VERBOSITY default +-- Going beyond Julian day zero should give out-of-range error +SELECT _timescaledb_functions.to_timestamp(-210866803200000001); +ERROR: timestamp out of range +-- Lower bound on date (should return the Julian day zero UNIX timestamp above) +SELECT _timescaledb_functions.to_unix_microseconds('4714-11-24 00:00:00+00 BC'); + to_unix_microseconds +---------------------- + -210866803200000000 +(1 row) + +-- Going beyond lower bound on date should return out-of-range +SELECT _timescaledb_functions.to_unix_microseconds('4714-11-23 23:59:59.999999+00 BC'); +ERROR: timestamp out of range: "4714-11-23 23:59:59.999999+00 BC" +LINE 1: ...ELECT _timescaledb_functions.to_unix_microseconds('4714-11-2... + ^ +-- The upper bound for Postgres TIMESTAMPTZ +SELECT timestamp '294276-12-31 23:59:59.999999+00'; + timestamp +----------------------------------- + Sun Dec 31 23:59:59.999999 294276 +(1 row) + +-- Going beyond the upper bound, should fail +SELECT timestamp '294276-12-31 23:59:59.999999+00' + interval '1 us'; +ERROR: timestamp out of range +-- Cannot represent the upper bound timestamp with a UNIX microsecond timestamp +-- since the Postgres epoch is at a later date than the UNIX epoch. +SELECT _timescaledb_functions.to_unix_microseconds('294276-12-31 23:59:59.999999+00'); +ERROR: timestamp out of range +-- Subtracting the difference between the two epochs (10957 days) should bring +-- us within range. +SELECT timestamp '294276-12-31 23:59:59.999999+00' - interval '10957 days'; + ?column? +----------------------------------- + Fri Jan 01 23:59:59.999999 294247 +(1 row) + +SELECT _timescaledb_functions.to_unix_microseconds('294247-01-01 23:59:59.999999'); + to_unix_microseconds +---------------------- + 9223371331199999999 +(1 row) + +-- Adding one microsecond should take us out-of-range again +SELECT timestamp '294247-01-01 23:59:59.999999' + interval '1 us'; + ?column? +---------------------------- + Sat Jan 02 00:00:00 294247 +(1 row) + +SELECT _timescaledb_functions.to_unix_microseconds(timestamp '294247-01-01 23:59:59.999999' + interval '1 us'); +ERROR: timestamp out of range +--no time_bucketing of dates not by integer # of days +SELECT time_bucket('1 hour', DATE '2012-01-01'); +ERROR: interval must not have sub-day precision +SELECT time_bucket('25 hour', DATE '2012-01-01'); +ERROR: interval must be a multiple of a day +\set ON_ERROR_STOP 1 +SELECT time_bucket(INTERVAL '1 day', TIMESTAMP '2011-01-02 01:01:01'); + time_bucket +-------------------------- + Sun Jan 02 00:00:00 2011 +(1 row) + +SELECT time, time_bucket(INTERVAL '2 day ', time) +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-01 01:01:01', + TIMESTAMP '2011-01-02 01:01:01', + TIMESTAMP '2011-01-03 01:01:01', + TIMESTAMP '2011-01-04 01:01:01' + ]) AS time; + time | time_bucket +--------------------------+-------------------------- + Sat Jan 01 01:01:01 2011 | Sat Jan 01 00:00:00 2011 + Sun Jan 02 01:01:01 2011 | Sat Jan 01 00:00:00 2011 + Mon Jan 03 01:01:01 2011 | Mon Jan 03 00:00:00 2011 + Tue Jan 04 01:01:01 2011 | Mon Jan 03 00:00:00 2011 +(4 rows) + +SELECT int_def, time_bucket(int_def,TIMESTAMP '2011-01-02 01:01:01.111') +FROM unnest(ARRAY[ + INTERVAL '1 millisecond', + INTERVAL '1 second', + INTERVAL '1 minute', + INTERVAL '1 hour', + INTERVAL '1 day', + INTERVAL '2 millisecond', + INTERVAL '2 second', + INTERVAL '2 minute', + INTERVAL '2 hour', + INTERVAL '2 day' + ]) AS int_def; + int_def | time_bucket +--------------+------------------------------ + @ 0.001 secs | Sun Jan 02 01:01:01.111 2011 + @ 1 sec | Sun Jan 02 01:01:01 2011 + @ 1 min | Sun Jan 02 01:01:00 2011 + @ 1 hour | Sun Jan 02 01:00:00 2011 + @ 1 day | Sun Jan 02 00:00:00 2011 + @ 0.002 secs | Sun Jan 02 01:01:01.11 2011 + @ 2 secs | Sun Jan 02 01:01:00 2011 + @ 2 mins | Sun Jan 02 01:00:00 2011 + @ 2 hours | Sun Jan 02 00:00:00 2011 + @ 2 days | Sat Jan 01 00:00:00 2011 +(10 rows) + +\set ON_ERROR_STOP 0 +SELECT time_bucket(INTERVAL '1 year 1d',TIMESTAMP '2011-01-02 01:01:01.111'); +ERROR: month intervals cannot have day or time component +SELECT time_bucket(INTERVAL '1 month 1 minute',TIMESTAMP '2011-01-02 01:01:01.111'); +ERROR: month intervals cannot have day or time component +\set ON_ERROR_STOP 1 +SELECT time, time_bucket(INTERVAL '5 minute', time) +FROM unnest(ARRAY[ + TIMESTAMP '1970-01-01 00:59:59.999999', + TIMESTAMP '1970-01-01 01:01:00', + TIMESTAMP '1970-01-01 01:04:59.999999', + TIMESTAMP '1970-01-01 01:05:00' + ]) AS time; + time | time_bucket +---------------------------------+-------------------------- + Thu Jan 01 00:59:59.999999 1970 | Thu Jan 01 00:55:00 1970 + Thu Jan 01 01:01:00 1970 | Thu Jan 01 01:00:00 1970 + Thu Jan 01 01:04:59.999999 1970 | Thu Jan 01 01:00:00 1970 + Thu Jan 01 01:05:00 1970 | Thu Jan 01 01:05:00 1970 +(4 rows) + +SELECT time, time_bucket(INTERVAL '5 minute', time) +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:04:59.999999', + TIMESTAMP '2011-01-02 01:05:00', + TIMESTAMP '2011-01-02 01:09:59.999999', + TIMESTAMP '2011-01-02 01:10:00' + ]) AS time; + time | time_bucket +---------------------------------+-------------------------- + Sun Jan 02 01:04:59.999999 2011 | Sun Jan 02 01:00:00 2011 + Sun Jan 02 01:05:00 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:09:59.999999 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:10:00 2011 | Sun Jan 02 01:10:00 2011 +(4 rows) + +--offset with interval +SELECT time, time_bucket(INTERVAL '5 minute', time , INTERVAL '2 minutes') +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:01:59.999999', + TIMESTAMP '2011-01-02 01:02:00', + TIMESTAMP '2011-01-02 01:06:59.999999', + TIMESTAMP '2011-01-02 01:07:00' + ]) AS time; + time | time_bucket +---------------------------------+-------------------------- + Sun Jan 02 01:01:59.999999 2011 | Sun Jan 02 00:57:00 2011 + Sun Jan 02 01:02:00 2011 | Sun Jan 02 01:02:00 2011 + Sun Jan 02 01:06:59.999999 2011 | Sun Jan 02 01:02:00 2011 + Sun Jan 02 01:07:00 2011 | Sun Jan 02 01:07:00 2011 +(4 rows) + +SELECT time, time_bucket(INTERVAL '5 minute', time , - INTERVAL '2 minutes') +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:02:59.999999', + TIMESTAMP '2011-01-02 01:03:00', + TIMESTAMP '2011-01-02 01:07:59.999999', + TIMESTAMP '2011-01-02 01:08:00' + ]) AS time; + time | time_bucket +---------------------------------+-------------------------- + Sun Jan 02 01:02:59.999999 2011 | Sun Jan 02 00:58:00 2011 + Sun Jan 02 01:03:00 2011 | Sun Jan 02 01:03:00 2011 + Sun Jan 02 01:07:59.999999 2011 | Sun Jan 02 01:03:00 2011 + Sun Jan 02 01:08:00 2011 | Sun Jan 02 01:08:00 2011 +(4 rows) + +--offset with infinity +-- timestamp +SELECT time, time_bucket(INTERVAL '1 week', time, INTERVAL '1 day') +FROM unnest(ARRAY[ + timestamp '-Infinity', + timestamp 'Infinity' + ]) AS time; + time | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +-- timestamptz +SELECT time, time_bucket(INTERVAL '1 week', time, INTERVAL '1 day') +FROM unnest(ARRAY[ + timestamp with time zone '-Infinity', + timestamp with time zone 'Infinity' + ]) AS time; + time | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +-- Date +SELECT date, time_bucket(INTERVAL '1 week', date, INTERVAL '1 day') +FROM unnest(ARRAY[ + date '-Infinity', + date 'Infinity' + ]) AS date; + date | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +--example to align with an origin +SELECT time, time_bucket(INTERVAL '5 minute', time - (TIMESTAMP '2011-01-02 00:02:00' - TIMESTAMP 'epoch')) + (TIMESTAMP '2011-01-02 00:02:00'-TIMESTAMP 'epoch') +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:01:59.999999', + TIMESTAMP '2011-01-02 01:02:00', + TIMESTAMP '2011-01-02 01:06:59.999999', + TIMESTAMP '2011-01-02 01:07:00' + ]) AS time; + time | ?column? +---------------------------------+-------------------------- + Sun Jan 02 01:01:59.999999 2011 | Sun Jan 02 00:57:00 2011 + Sun Jan 02 01:02:00 2011 | Sun Jan 02 01:02:00 2011 + Sun Jan 02 01:06:59.999999 2011 | Sun Jan 02 01:02:00 2011 + Sun Jan 02 01:07:00 2011 | Sun Jan 02 01:07:00 2011 +(4 rows) + +--rounding version +SELECT time, time_bucket(INTERVAL '5 minute', time , - INTERVAL '2.5 minutes') + INTERVAL '2 minutes 30 seconds' +FROM unnest(ARRAY[ + TIMESTAMP '2011-01-02 01:05:01', + TIMESTAMP '2011-01-02 01:07:29', + TIMESTAMP '2011-01-02 01:02:30', + TIMESTAMP '2011-01-02 01:07:30', + TIMESTAMP '2011-01-02 01:02:29' + ]) AS time; + time | ?column? +--------------------------+-------------------------- + Sun Jan 02 01:05:01 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:07:29 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:02:30 2011 | Sun Jan 02 01:05:00 2011 + Sun Jan 02 01:07:30 2011 | Sun Jan 02 01:10:00 2011 + Sun Jan 02 01:02:29 2011 | Sun Jan 02 01:00:00 2011 +(5 rows) + +--time_bucket with timezone should mimick date_trunc +SET timezone TO 'UTC'; +SELECT time, time_bucket(INTERVAL '1 hour', time), date_trunc('hour', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 UTC | Sun Jan 02 01:00:00 2011 UTC | Sun Jan 02 01:00:00 2011 UTC + Sun Jan 02 00:01:01 2011 UTC | Sun Jan 02 00:00:00 2011 UTC | Sun Jan 02 00:00:00 2011 UTC + Sat Jan 01 23:01:01 2011 UTC | Sat Jan 01 23:00:00 2011 UTC | Sat Jan 01 23:00:00 2011 UTC +(3 rows) + +SELECT time, time_bucket(INTERVAL '1 day', time), date_trunc('day', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 UTC | Sun Jan 02 00:00:00 2011 UTC | Sun Jan 02 00:00:00 2011 UTC + Sun Jan 02 00:01:01 2011 UTC | Sun Jan 02 00:00:00 2011 UTC | Sun Jan 02 00:00:00 2011 UTC + Sat Jan 01 23:01:01 2011 UTC | Sat Jan 01 00:00:00 2011 UTC | Sat Jan 01 00:00:00 2011 UTC +(3 rows) + +--what happens with a local tz +SET timezone TO 'America/New_York'; +SELECT time, time_bucket(INTERVAL '1 hour', time), date_trunc('hour', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 EST | Sun Jan 02 01:00:00 2011 EST | Sun Jan 02 01:00:00 2011 EST + Sat Jan 01 19:01:01 2011 EST | Sat Jan 01 19:00:00 2011 EST | Sat Jan 01 19:00:00 2011 EST + Sat Jan 01 18:01:01 2011 EST | Sat Jan 01 18:00:00 2011 EST | Sat Jan 01 18:00:00 2011 EST +(3 rows) + +--Note the timestamp tz input is aligned with UTC day /not/ local day. different than date_trunc. +SELECT time, time_bucket(INTERVAL '1 day', time), date_trunc('day', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-03 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-04 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 EST | Sat Jan 01 19:00:00 2011 EST | Sun Jan 02 00:00:00 2011 EST + Sun Jan 02 19:01:01 2011 EST | Sun Jan 02 19:00:00 2011 EST | Sun Jan 02 00:00:00 2011 EST + Mon Jan 03 18:01:01 2011 EST | Sun Jan 02 19:00:00 2011 EST | Mon Jan 03 00:00:00 2011 EST +(3 rows) + +--can force local bucketing with simple cast. +SELECT time, time_bucket(INTERVAL '1 day', time::timestamp), date_trunc('day', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-03 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-04 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+--------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 EST | Sun Jan 02 00:00:00 2011 | Sun Jan 02 00:00:00 2011 EST + Sun Jan 02 19:01:01 2011 EST | Sun Jan 02 00:00:00 2011 | Sun Jan 02 00:00:00 2011 EST + Mon Jan 03 18:01:01 2011 EST | Mon Jan 03 00:00:00 2011 | Mon Jan 03 00:00:00 2011 EST +(3 rows) + +--can also use interval to correct +SELECT time, time_bucket(INTERVAL '1 day', time, -INTERVAL '19 hours'), date_trunc('day', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2011-01-02 01:01:01', + TIMESTAMP WITH TIME ZONE '2011-01-03 01:01:01+01', + TIMESTAMP WITH TIME ZONE '2011-01-04 01:01:01+02' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Jan 02 01:01:01 2011 EST | Sun Jan 02 00:00:00 2011 EST | Sun Jan 02 00:00:00 2011 EST + Sun Jan 02 19:01:01 2011 EST | Sun Jan 02 00:00:00 2011 EST | Sun Jan 02 00:00:00 2011 EST + Mon Jan 03 18:01:01 2011 EST | Mon Jan 03 00:00:00 2011 EST | Mon Jan 03 00:00:00 2011 EST +(3 rows) + +--dst: same local hour bucketed as two different hours. +SELECT time, time_bucket(INTERVAL '1 hour', time), date_trunc('hour', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2017-11-05 12:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 13:05:00+07' + ]) AS time; + time | time_bucket | date_trunc +------------------------------+------------------------------+------------------------------ + Sun Nov 05 01:05:00 2017 EDT | Sun Nov 05 01:00:00 2017 EDT | Sun Nov 05 01:00:00 2017 EDT + Sun Nov 05 01:05:00 2017 EST | Sun Nov 05 01:00:00 2017 EST | Sun Nov 05 01:00:00 2017 EST +(2 rows) + +--local alignment changes when bucketing by UTC across dst boundary +SELECT time, time_bucket(INTERVAL '2 hour', time) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2017-11-05 10:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 12:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 13:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 15:05:00+07' + ]) AS time; + time | time_bucket +------------------------------+------------------------------ + Sat Nov 04 23:05:00 2017 EDT | Sat Nov 04 22:00:00 2017 EDT + Sun Nov 05 01:05:00 2017 EDT | Sun Nov 05 00:00:00 2017 EDT + Sun Nov 05 01:05:00 2017 EST | Sun Nov 05 01:00:00 2017 EST + Sun Nov 05 03:05:00 2017 EST | Sun Nov 05 03:00:00 2017 EST +(4 rows) + +--local alignment is preserved when bucketing by local time across DST boundary. +SELECT time, time_bucket(INTERVAL '2 hour', time::timestamp) +FROM unnest(ARRAY[ + TIMESTAMP WITH TIME ZONE '2017-11-05 10:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 12:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 13:05:00+07', + TIMESTAMP WITH TIME ZONE '2017-11-05 15:05:00+07' + ]) AS time; + time | time_bucket +------------------------------+-------------------------- + Sat Nov 04 23:05:00 2017 EDT | Sat Nov 04 22:00:00 2017 + Sun Nov 05 01:05:00 2017 EDT | Sun Nov 05 00:00:00 2017 + Sun Nov 05 01:05:00 2017 EST | Sun Nov 05 00:00:00 2017 + Sun Nov 05 03:05:00 2017 EST | Sun Nov 05 02:00:00 2017 +(4 rows) + +SELECT time, + time_bucket(10::smallint, time) AS time_bucket_smallint, + time_bucket(10::int, time) AS time_bucket_int, + time_bucket(10::bigint, time) AS time_bucket_bigint +FROM unnest(ARRAY[ + '-11', + '-10', + '-9', + '-1', + '0', + '1', + '99', + '100', + '109', + '110' + ]::smallint[]) AS time; + time | time_bucket_smallint | time_bucket_int | time_bucket_bigint +------+----------------------+-----------------+-------------------- + -11 | -20 | -20 | -20 + -10 | -10 | -10 | -10 + -9 | -10 | -10 | -10 + -1 | -10 | -10 | -10 + 0 | 0 | 0 | 0 + 1 | 0 | 0 | 0 + 99 | 90 | 90 | 90 + 100 | 100 | 100 | 100 + 109 | 100 | 100 | 100 + 110 | 110 | 110 | 110 +(10 rows) + +SELECT time, + time_bucket(10::smallint, time, 2::smallint) AS time_bucket_smallint, + time_bucket(10::int, time, 2::int) AS time_bucket_int, + time_bucket(10::bigint, time, 2::bigint) AS time_bucket_bigint +FROM unnest(ARRAY[ + '-9', + '-8', + '-7', + '1', + '2', + '3', + '101', + '102', + '111', + '112' + ]::smallint[]) AS time; + time | time_bucket_smallint | time_bucket_int | time_bucket_bigint +------+----------------------+-----------------+-------------------- + -9 | -18 | -18 | -18 + -8 | -8 | -8 | -8 + -7 | -8 | -8 | -8 + 1 | -8 | -8 | -8 + 2 | 2 | 2 | 2 + 3 | 2 | 2 | 2 + 101 | 92 | 92 | 92 + 102 | 102 | 102 | 102 + 111 | 102 | 102 | 102 + 112 | 112 | 112 | 112 +(10 rows) + +SELECT time, + time_bucket(10::smallint, time, -2::smallint) AS time_bucket_smallint, + time_bucket(10::int, time, -2::int) AS time_bucket_int, + time_bucket(10::bigint, time, -2::bigint) AS time_bucket_bigint +FROM unnest(ARRAY[ + '-13', + '-12', + '-11', + '-3', + '-2', + '-1', + '97', + '98', + '107', + '108' + ]::smallint[]) AS time; + time | time_bucket_smallint | time_bucket_int | time_bucket_bigint +------+----------------------+-----------------+-------------------- + -13 | -22 | -22 | -22 + -12 | -12 | -12 | -12 + -11 | -12 | -12 | -12 + -3 | -12 | -12 | -12 + -2 | -2 | -2 | -2 + -1 | -2 | -2 | -2 + 97 | 88 | 88 | 88 + 98 | 98 | 98 | 98 + 107 | 98 | 98 | 98 + 108 | 108 | 108 | 108 +(10 rows) + +\set ON_ERROR_STOP 0 +SELECT time_bucket(10::smallint, '-32768'::smallint); +ERROR: timestamp out of range +SELECT time_bucket(10::smallint, '-32761'::smallint); +ERROR: timestamp out of range +select time_bucket(10::smallint, '-32768'::smallint, 1000::smallint); +ERROR: timestamp out of range +select time_bucket(10::smallint, '-32768'::smallint, '32767'::smallint); +ERROR: timestamp out of range +select time_bucket(10::smallint, '32767'::smallint, '-32768'::smallint); +ERROR: timestamp out of range +\set ON_ERROR_STOP 1 +SELECT time, time_bucket(10::smallint, time) +FROM unnest(ARRAY[ + '-32760', + '-32759', + '32767' + ]::smallint[]) AS time; + time | time_bucket +--------+------------- + -32760 | -32760 + -32759 | -32760 + 32767 | 32760 +(3 rows) + +\set ON_ERROR_STOP 0 +SELECT time_bucket(10::int, '-2147483648'::int); +ERROR: timestamp out of range +SELECT time_bucket(10::int, '-2147483641'::int); +ERROR: timestamp out of range +SELECT time_bucket(1000::int, '-2147483000'::int, 1::int); +ERROR: timestamp out of range +SELECT time_bucket(1000::int, '-2147483648'::int, '2147483647'::int); +ERROR: timestamp out of range +SELECT time_bucket(1000::int, '2147483647'::int, '-2147483648'::int); +ERROR: timestamp out of range +\set ON_ERROR_STOP 1 +SELECT time, time_bucket(10::int, time) +FROM unnest(ARRAY[ + '-2147483640', + '-2147483639', + '2147483647' + ]::int[]) AS time; + time | time_bucket +-------------+------------- + -2147483640 | -2147483640 + -2147483639 | -2147483640 + 2147483647 | 2147483640 +(3 rows) + +\set ON_ERROR_STOP 0 +SELECT time_bucket(10::bigint, '-9223372036854775808'::bigint); +ERROR: timestamp out of range +SELECT time_bucket(10::bigint, '-9223372036854775801'::bigint); +ERROR: timestamp out of range +SELECT time_bucket(1000::bigint, '-9223372036854775000'::bigint, 1::bigint); +ERROR: timestamp out of range +SELECT time_bucket(1000::bigint, '-9223372036854775808'::bigint, '9223372036854775807'::bigint); +ERROR: timestamp out of range +SELECT time_bucket(1000::bigint, '9223372036854775807'::bigint, '-9223372036854775808'::bigint); +ERROR: timestamp out of range +\set ON_ERROR_STOP 1 +SELECT time, time_bucket(10::bigint, time) +FROM unnest(ARRAY[ + '-9223372036854775800', + '-9223372036854775799', + '9223372036854775807' + ]::bigint[]) AS time; + time | time_bucket +----------------------+---------------------- + -9223372036854775800 | -9223372036854775800 + -9223372036854775799 | -9223372036854775800 + 9223372036854775807 | 9223372036854775800 +(3 rows) + +SELECT time, time_bucket(INTERVAL '1 day', time::date) +FROM unnest(ARRAY[ + date '2017-11-05', + date '2017-11-06' + ]) AS time; + time | time_bucket +------------+------------- + 11-05-2017 | 11-05-2017 + 11-06-2017 | 11-06-2017 +(2 rows) + +SELECT time, time_bucket(INTERVAL '4 day', time::date) +FROM unnest(ARRAY[ + date '2017-11-04', + date '2017-11-05', + date '2017-11-08', + date '2017-11-09' + ]) AS time; + time | time_bucket +------------+------------- + 11-04-2017 | 11-01-2017 + 11-05-2017 | 11-05-2017 + 11-08-2017 | 11-05-2017 + 11-09-2017 | 11-09-2017 +(4 rows) + +SELECT time, time_bucket(INTERVAL '4 day', time::date, INTERVAL '2 day') +FROM unnest(ARRAY[ + date '2017-11-06', + date '2017-11-07', + date '2017-11-10', + date '2017-11-11' + ]) AS time; + time | time_bucket +------------+------------- + 11-06-2017 | 11-03-2017 + 11-07-2017 | 11-07-2017 + 11-10-2017 | 11-07-2017 + 11-11-2017 | 11-11-2017 +(4 rows) + +-- 2019-09-24 is a Monday, and we want to ensure that time_bucket returns the week starting with a Monday as date_trunc does, +-- Rather than a Saturday which is the date of the PostgreSQL epoch +SELECT time, time_bucket(INTERVAL '1 week', time::date) +FROM unnest(ARRAY[ + date '2018-09-16', + date '2018-09-17', + date '2018-09-23', + date '2018-09-24' + ]) AS time; + time | time_bucket +------------+------------- + 09-16-2018 | 09-10-2018 + 09-17-2018 | 09-17-2018 + 09-23-2018 | 09-17-2018 + 09-24-2018 | 09-24-2018 +(4 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp without time zone '2018-09-16', + timestamp without time zone '2018-09-17', + timestamp without time zone '2018-09-23', + timestamp without time zone '2018-09-24' + ]) AS time; + time | time_bucket +--------------------------+-------------------------- + Sun Sep 16 00:00:00 2018 | Mon Sep 10 00:00:00 2018 + Mon Sep 17 00:00:00 2018 | Mon Sep 17 00:00:00 2018 + Sun Sep 23 00:00:00 2018 | Mon Sep 17 00:00:00 2018 + Mon Sep 24 00:00:00 2018 | Mon Sep 24 00:00:00 2018 +(4 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp with time zone '2018-09-16', + timestamp with time zone '2018-09-17', + timestamp with time zone '2018-09-23', + timestamp with time zone '2018-09-24' + ]) AS time; + time | time_bucket +------------------------------+------------------------------ + Sun Sep 16 00:00:00 2018 EDT | Sun Sep 09 20:00:00 2018 EDT + Mon Sep 17 00:00:00 2018 EDT | Sun Sep 16 20:00:00 2018 EDT + Sun Sep 23 00:00:00 2018 EDT | Sun Sep 16 20:00:00 2018 EDT + Mon Sep 24 00:00:00 2018 EDT | Sun Sep 23 20:00:00 2018 EDT +(4 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp with time zone '-Infinity', + timestamp with time zone 'Infinity' + ]) AS time; + time | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp without time zone '-Infinity', + timestamp without time zone 'Infinity' + ]) AS time; + time | time_bucket +-----------+------------- + -infinity | -infinity + infinity | infinity +(2 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time), date_trunc('week', time) = time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp without time zone '4714-11-24 01:01:01.0 BC', + timestamp without time zone '294276-12-31 23:59:59.9999' + ]) AS time; + time | time_bucket | ?column? +---------------------------------+-----------------------------+---------- + Mon Nov 24 01:01:01 4714 BC | Mon Nov 24 00:00:00 4714 BC | t + Sun Dec 31 23:59:59.9999 294276 | Mon Dec 25 00:00:00 294276 | t +(2 rows) + +--1000 years later weeks still align. +SELECT time, time_bucket(INTERVAL '1 week', time), date_trunc('week', time) = time_bucket(INTERVAL '1 week', time) +FROM unnest(ARRAY[ + timestamp without time zone '3018-09-14', + timestamp without time zone '3018-09-20', + timestamp without time zone '3018-09-21', + timestamp without time zone '3018-09-22' + ]) AS time; + time | time_bucket | ?column? +--------------------------+--------------------------+---------- + Mon Sep 14 00:00:00 3018 | Mon Sep 14 00:00:00 3018 | t + Sun Sep 20 00:00:00 3018 | Mon Sep 14 00:00:00 3018 | t + Mon Sep 21 00:00:00 3018 | Mon Sep 21 00:00:00 3018 | t + Tue Sep 22 00:00:00 3018 | Mon Sep 21 00:00:00 3018 | t +(4 rows) + +--weeks align for timestamptz as well if cast to local time, (but not if done at UTC). +SELECT time, date_trunc('week', time) = time_bucket(INTERVAL '1 week', time), date_trunc('week', time) = time_bucket(INTERVAL '1 week', time::timestamp) +FROM unnest(ARRAY[ + timestamp with time zone '3018-09-14', + timestamp with time zone '3018-09-20', + timestamp with time zone '3018-09-21', + timestamp with time zone '3018-09-22' + ]) AS time; + time | ?column? | ?column? +------------------------------+----------+---------- + Mon Sep 14 00:00:00 3018 EDT | f | t + Sun Sep 20 00:00:00 3018 EDT | f | t + Mon Sep 21 00:00:00 3018 EDT | f | t + Tue Sep 22 00:00:00 3018 EDT | f | t +(4 rows) + +--check functions with origin +--note that the default origin is at 0 UTC, using origin parameter it is easy to provide a EDT origin point +\x +SELECT time, time_bucket(INTERVAL '1 week', time) no_epoch, + time_bucket(INTERVAL '1 week', time::timestamp) no_epoch_local, + time_bucket(INTERVAL '1 week', time) = time_bucket(INTERVAL '1 week', time, timestamptz '2000-01-03 00:00:00+0') always_true, + time_bucket(INTERVAL '1 week', time, timestamptz '2000-01-01 00:00:00+0') pg_epoch, + time_bucket(INTERVAL '1 week', time, timestamptz 'epoch') unix_epoch, + time_bucket(INTERVAL '1 week', time, timestamptz '3018-09-13') custom_1, + time_bucket(INTERVAL '1 week', time, timestamptz '3018-09-14') custom_2 +FROM unnest(ARRAY[ + timestamp with time zone '2000-01-01 00:00:00+0'- interval '1 second', + timestamp with time zone '2000-01-01 00:00:00+0', + timestamp with time zone '2000-01-03 00:00:00+0'- interval '1 second', + timestamp with time zone '2000-01-03 00:00:00+0', + timestamp with time zone '2000-01-01', + timestamp with time zone '2000-01-02', + timestamp with time zone '2000-01-03', + timestamp with time zone '3018-09-12', + timestamp with time zone '3018-09-13', + timestamp with time zone '3018-09-14', + timestamp with time zone '3018-09-15' + ]) AS time; +-[ RECORD 1 ]--+----------------------------- +time | Fri Dec 31 18:59:59 1999 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 24 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Dec 25 23:00:00 1999 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 2 ]--+----------------------------- +time | Fri Dec 31 19:00:00 1999 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Dec 25 23:00:00 1999 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 3 ]--+----------------------------- +time | Sun Jan 02 18:59:59 2000 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Jan 01 23:00:00 2000 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 4 ]--+----------------------------- +time | Sun Jan 02 19:00:00 2000 EST +no_epoch | Sun Jan 02 19:00:00 2000 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Jan 01 23:00:00 2000 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 5 ]--+----------------------------- +time | Sat Jan 01 00:00:00 2000 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Dec 25 23:00:00 1999 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 6 ]--+----------------------------- +time | Sun Jan 02 00:00:00 2000 EST +no_epoch | Sun Dec 26 19:00:00 1999 EST +no_epoch_local | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Jan 01 23:00:00 2000 EST +custom_2 | Sun Dec 26 23:00:00 1999 EST +-[ RECORD 7 ]--+----------------------------- +time | Mon Jan 03 00:00:00 2000 EST +no_epoch | Sun Jan 02 19:00:00 2000 EST +no_epoch_local | Mon Jan 03 00:00:00 2000 +always_true | t +pg_epoch | Fri Dec 31 19:00:00 1999 EST +unix_epoch | Wed Dec 29 19:00:00 1999 EST +custom_1 | Sat Jan 01 23:00:00 2000 EST +custom_2 | Sun Jan 02 23:00:00 2000 EST +-[ RECORD 8 ]--+----------------------------- +time | Sat Sep 12 00:00:00 3018 EDT +no_epoch | Sun Sep 06 20:00:00 3018 EDT +no_epoch_local | Mon Sep 07 00:00:00 3018 +always_true | t +pg_epoch | Fri Sep 11 20:00:00 3018 EDT +unix_epoch | Wed Sep 09 20:00:00 3018 EDT +custom_1 | Sun Sep 06 00:00:00 3018 EDT +custom_2 | Mon Sep 07 00:00:00 3018 EDT +-[ RECORD 9 ]--+----------------------------- +time | Sun Sep 13 00:00:00 3018 EDT +no_epoch | Sun Sep 06 20:00:00 3018 EDT +no_epoch_local | Mon Sep 07 00:00:00 3018 +always_true | t +pg_epoch | Fri Sep 11 20:00:00 3018 EDT +unix_epoch | Wed Sep 09 20:00:00 3018 EDT +custom_1 | Sun Sep 13 00:00:00 3018 EDT +custom_2 | Mon Sep 07 00:00:00 3018 EDT +-[ RECORD 10 ]-+----------------------------- +time | Mon Sep 14 00:00:00 3018 EDT +no_epoch | Sun Sep 13 20:00:00 3018 EDT +no_epoch_local | Mon Sep 14 00:00:00 3018 +always_true | t +pg_epoch | Fri Sep 11 20:00:00 3018 EDT +unix_epoch | Wed Sep 09 20:00:00 3018 EDT +custom_1 | Sun Sep 13 00:00:00 3018 EDT +custom_2 | Mon Sep 14 00:00:00 3018 EDT +-[ RECORD 11 ]-+----------------------------- +time | Tue Sep 15 00:00:00 3018 EDT +no_epoch | Sun Sep 13 20:00:00 3018 EDT +no_epoch_local | Mon Sep 14 00:00:00 3018 +always_true | t +pg_epoch | Fri Sep 11 20:00:00 3018 EDT +unix_epoch | Wed Sep 09 20:00:00 3018 EDT +custom_1 | Sun Sep 13 00:00:00 3018 EDT +custom_2 | Mon Sep 14 00:00:00 3018 EDT + +SELECT time, time_bucket(INTERVAL '1 week', time) no_epoch, + time_bucket(INTERVAL '1 week', time) = time_bucket(INTERVAL '1 week', time, timestamp '2000-01-03 00:00:00') always_true, + time_bucket(INTERVAL '1 week', time, timestamp '2000-01-01 00:00:00+0') pg_epoch, + time_bucket(INTERVAL '1 week', time, timestamp 'epoch') unix_epoch, + time_bucket(INTERVAL '1 week', time, timestamp '3018-09-13') custom_1, + time_bucket(INTERVAL '1 week', time, timestamp '3018-09-14') custom_2 +FROM unnest(ARRAY[ + timestamp without time zone '2000-01-01 00:00:00'- interval '1 second', + timestamp without time zone '2000-01-01 00:00:00', + timestamp without time zone '2000-01-03 00:00:00'- interval '1 second', + timestamp without time zone '2000-01-03 00:00:00', + timestamp without time zone '2000-01-01', + timestamp without time zone '2000-01-02', + timestamp without time zone '2000-01-03', + timestamp without time zone '3018-09-12', + timestamp without time zone '3018-09-13', + timestamp without time zone '3018-09-14', + timestamp without time zone '3018-09-15' + ]) AS time; +-[ RECORD 1 ]------------------------- +time | Fri Dec 31 23:59:59 1999 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Dec 25 00:00:00 1999 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Dec 26 00:00:00 1999 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 2 ]------------------------- +time | Sat Jan 01 00:00:00 2000 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Dec 26 00:00:00 1999 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 3 ]------------------------- +time | Sun Jan 02 23:59:59 2000 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Jan 02 00:00:00 2000 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 4 ]------------------------- +time | Mon Jan 03 00:00:00 2000 +no_epoch | Mon Jan 03 00:00:00 2000 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Jan 02 00:00:00 2000 +custom_2 | Mon Jan 03 00:00:00 2000 +-[ RECORD 5 ]------------------------- +time | Sat Jan 01 00:00:00 2000 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Dec 26 00:00:00 1999 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 6 ]------------------------- +time | Sun Jan 02 00:00:00 2000 +no_epoch | Mon Dec 27 00:00:00 1999 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Jan 02 00:00:00 2000 +custom_2 | Mon Dec 27 00:00:00 1999 +-[ RECORD 7 ]------------------------- +time | Mon Jan 03 00:00:00 2000 +no_epoch | Mon Jan 03 00:00:00 2000 +always_true | t +pg_epoch | Sat Jan 01 00:00:00 2000 +unix_epoch | Thu Dec 30 00:00:00 1999 +custom_1 | Sun Jan 02 00:00:00 2000 +custom_2 | Mon Jan 03 00:00:00 2000 +-[ RECORD 8 ]------------------------- +time | Sat Sep 12 00:00:00 3018 +no_epoch | Mon Sep 07 00:00:00 3018 +always_true | t +pg_epoch | Sat Sep 12 00:00:00 3018 +unix_epoch | Thu Sep 10 00:00:00 3018 +custom_1 | Sun Sep 06 00:00:00 3018 +custom_2 | Mon Sep 07 00:00:00 3018 +-[ RECORD 9 ]------------------------- +time | Sun Sep 13 00:00:00 3018 +no_epoch | Mon Sep 07 00:00:00 3018 +always_true | t +pg_epoch | Sat Sep 12 00:00:00 3018 +unix_epoch | Thu Sep 10 00:00:00 3018 +custom_1 | Sun Sep 13 00:00:00 3018 +custom_2 | Mon Sep 07 00:00:00 3018 +-[ RECORD 10 ]------------------------ +time | Mon Sep 14 00:00:00 3018 +no_epoch | Mon Sep 14 00:00:00 3018 +always_true | t +pg_epoch | Sat Sep 12 00:00:00 3018 +unix_epoch | Thu Sep 10 00:00:00 3018 +custom_1 | Sun Sep 13 00:00:00 3018 +custom_2 | Mon Sep 14 00:00:00 3018 +-[ RECORD 11 ]------------------------ +time | Tue Sep 15 00:00:00 3018 +no_epoch | Mon Sep 14 00:00:00 3018 +always_true | t +pg_epoch | Sat Sep 12 00:00:00 3018 +unix_epoch | Thu Sep 10 00:00:00 3018 +custom_1 | Sun Sep 13 00:00:00 3018 +custom_2 | Mon Sep 14 00:00:00 3018 + +SELECT time, time_bucket(INTERVAL '1 week', time) no_epoch, + time_bucket(INTERVAL '1 week', time) = time_bucket(INTERVAL '1 week', time, date '2000-01-03') always_true, + time_bucket(INTERVAL '1 week', time, date '2000-01-01') pg_epoch, + time_bucket(INTERVAL '1 week', time, (timestamp 'epoch')::date) unix_epoch, + time_bucket(INTERVAL '1 week', time, date '3018-09-13') custom_1, + time_bucket(INTERVAL '1 week', time, date '3018-09-14') custom_2 +FROM unnest(ARRAY[ + date '1999-12-31', + date '2000-01-01', + date '2000-01-02', + date '2000-01-03', + date '3018-09-12', + date '3018-09-13', + date '3018-09-14', + date '3018-09-15' + ]) AS time; +-[ RECORD 1 ]----------- +time | 12-31-1999 +no_epoch | 12-27-1999 +always_true | t +pg_epoch | 12-25-1999 +unix_epoch | 12-30-1999 +custom_1 | 12-26-1999 +custom_2 | 12-27-1999 +-[ RECORD 2 ]----------- +time | 01-01-2000 +no_epoch | 12-27-1999 +always_true | t +pg_epoch | 01-01-2000 +unix_epoch | 12-30-1999 +custom_1 | 12-26-1999 +custom_2 | 12-27-1999 +-[ RECORD 3 ]----------- +time | 01-02-2000 +no_epoch | 12-27-1999 +always_true | t +pg_epoch | 01-01-2000 +unix_epoch | 12-30-1999 +custom_1 | 01-02-2000 +custom_2 | 12-27-1999 +-[ RECORD 4 ]----------- +time | 01-03-2000 +no_epoch | 01-03-2000 +always_true | t +pg_epoch | 01-01-2000 +unix_epoch | 12-30-1999 +custom_1 | 01-02-2000 +custom_2 | 01-03-2000 +-[ RECORD 5 ]----------- +time | 09-12-3018 +no_epoch | 09-07-3018 +always_true | t +pg_epoch | 09-12-3018 +unix_epoch | 09-10-3018 +custom_1 | 09-06-3018 +custom_2 | 09-07-3018 +-[ RECORD 6 ]----------- +time | 09-13-3018 +no_epoch | 09-07-3018 +always_true | t +pg_epoch | 09-12-3018 +unix_epoch | 09-10-3018 +custom_1 | 09-13-3018 +custom_2 | 09-07-3018 +-[ RECORD 7 ]----------- +time | 09-14-3018 +no_epoch | 09-14-3018 +always_true | t +pg_epoch | 09-12-3018 +unix_epoch | 09-10-3018 +custom_1 | 09-13-3018 +custom_2 | 09-14-3018 +-[ RECORD 8 ]----------- +time | 09-15-3018 +no_epoch | 09-14-3018 +always_true | t +pg_epoch | 09-12-3018 +unix_epoch | 09-10-3018 +custom_1 | 09-13-3018 +custom_2 | 09-14-3018 + +\x +--really old origin works if date around that time +SELECT time, time_bucket(INTERVAL '1 week', time, timestamp without time zone '4710-11-24 01:01:01.0 BC') +FROM unnest(ARRAY[ + timestamp without time zone '4710-11-24 01:01:01.0 BC', + timestamp without time zone '4710-11-25 01:01:01.0 BC', + timestamp without time zone '2001-01-01', + timestamp without time zone '3001-01-01' + ]) AS time; + time | time_bucket +-----------------------------+----------------------------- + Sat Nov 24 01:01:01 4710 BC | Sat Nov 24 01:01:01 4710 BC + Sun Nov 25 01:01:01 4710 BC | Sat Nov 24 01:01:01 4710 BC + Mon Jan 01 00:00:00 2001 | Sat Dec 30 01:01:01 2000 + Thu Jan 01 00:00:00 3001 | Sat Dec 27 01:01:01 3000 +(4 rows) + +SELECT time, time_bucket(INTERVAL '1 week', time, timestamp without time zone '294270-12-30 23:59:59.9999') +FROM unnest(ARRAY[ + timestamp without time zone '294270-12-29 23:59:59.9999', + timestamp without time zone '294270-12-30 23:59:59.9999', + timestamp without time zone '294270-12-31 23:59:59.9999', + timestamp without time zone '2001-01-01', + timestamp without time zone '3001-01-01' + ]) AS time; + time | time_bucket +---------------------------------+--------------------------------- + Thu Dec 29 23:59:59.9999 294270 | Fri Dec 23 23:59:59.9999 294270 + Fri Dec 30 23:59:59.9999 294270 | Fri Dec 30 23:59:59.9999 294270 + Sat Dec 31 23:59:59.9999 294270 | Fri Dec 30 23:59:59.9999 294270 + Mon Jan 01 00:00:00 2001 | Fri Dec 29 23:59:59.9999 2000 + Thu Jan 01 00:00:00 3001 | Fri Dec 26 23:59:59.9999 3000 +(5 rows) + +\set ON_ERROR_STOP 0 +--really old origin + very new data + long period errors +SELECT time, time_bucket(INTERVAL '100000 day', time, timestamp without time zone '4710-11-24 01:01:01.0 BC') +FROM unnest(ARRAY[ + timestamp without time zone '294270-12-31 23:59:59.9999' + ]) AS time; +ERROR: timestamp out of range +SELECT time, time_bucket(INTERVAL '100000 day', time, timestamp with time zone '4710-11-25 01:01:01.0 BC') +FROM unnest(ARRAY[ + timestamp with time zone '294270-12-30 23:59:59.9999' + ]) AS time; +ERROR: timestamp out of range +--really high origin + old data + long period errors out +SELECT time, time_bucket(INTERVAL '10000000 day', time, timestamp without time zone '294270-12-31 23:59:59.9999') +FROM unnest(ARRAY[ + timestamp without time zone '4710-11-24 01:01:01.0 BC' + ]) AS time; +ERROR: timestamp out of range +SELECT time, time_bucket(INTERVAL '10000000 day', time, timestamp with time zone '294270-12-31 23:59:59.9999') +FROM unnest(ARRAY[ + timestamp with time zone '4710-11-24 01:01:01.0 BC' + ]) AS time; +ERROR: timestamp out of range +\set ON_ERROR_STOP 1 +------------------------------------------- +--- Test time_bucket with month periods --- +------------------------------------------- +SET datestyle TO ISO; +SELECT + time::date, + time_bucket('1 month', time::date) AS "1m", + time_bucket('2 month', time::date) AS "2m", + time_bucket('3 month', time::date) AS "3m", + time_bucket('1 month', time::date, '2000-02-01'::date) AS "1m origin", + time_bucket('2 month', time::date, '2000-02-01'::date) AS "2m origin", + time_bucket('3 month', time::date, '2000-02-01'::date) AS "3m origin" +FROM generate_series('1990-01-03'::date,'1990-06-03'::date,'1month'::interval) time; + time | 1m | 2m | 3m | 1m origin | 2m origin | 3m origin +------------+------------+------------+------------+------------+------------+------------ + 1990-01-03 | 1990-01-01 | 1990-01-01 | 1990-01-01 | 1990-01-01 | 1989-12-01 | 1989-11-01 + 1990-02-03 | 1990-02-01 | 1990-01-01 | 1990-01-01 | 1990-02-01 | 1990-02-01 | 1990-02-01 + 1990-03-03 | 1990-03-01 | 1990-03-01 | 1990-01-01 | 1990-03-01 | 1990-02-01 | 1990-02-01 + 1990-04-03 | 1990-04-01 | 1990-03-01 | 1990-04-01 | 1990-04-01 | 1990-04-01 | 1990-02-01 + 1990-05-03 | 1990-05-01 | 1990-05-01 | 1990-04-01 | 1990-05-01 | 1990-04-01 | 1990-05-01 + 1990-06-03 | 1990-06-01 | 1990-05-01 | 1990-04-01 | 1990-06-01 | 1990-06-01 | 1990-05-01 +(6 rows) + +SELECT + time, + time_bucket('1 month', time) AS "1m", + time_bucket('2 month', time) AS "2m", + time_bucket('3 month', time) AS "3m", + time_bucket('1 month', time, '2000-02-01'::timestamp) AS "1m origin", + time_bucket('2 month', time, '2000-02-01'::timestamp) AS "2m origin", + time_bucket('3 month', time, '2000-02-01'::timestamp) AS "3m origin" +FROM generate_series('1990-01-03'::timestamp,'1990-06-03'::timestamp,'1month'::interval) time; + time | 1m | 2m | 3m | 1m origin | 2m origin | 3m origin +---------------------+---------------------+---------------------+---------------------+---------------------+---------------------+--------------------- + 1990-01-03 00:00:00 | 1990-01-01 00:00:00 | 1990-01-01 00:00:00 | 1990-01-01 00:00:00 | 1990-01-01 00:00:00 | 1989-12-01 00:00:00 | 1989-11-01 00:00:00 + 1990-02-03 00:00:00 | 1990-02-01 00:00:00 | 1990-01-01 00:00:00 | 1990-01-01 00:00:00 | 1990-02-01 00:00:00 | 1990-02-01 00:00:00 | 1990-02-01 00:00:00 + 1990-03-03 00:00:00 | 1990-03-01 00:00:00 | 1990-03-01 00:00:00 | 1990-01-01 00:00:00 | 1990-03-01 00:00:00 | 1990-02-01 00:00:00 | 1990-02-01 00:00:00 + 1990-04-03 00:00:00 | 1990-04-01 00:00:00 | 1990-03-01 00:00:00 | 1990-04-01 00:00:00 | 1990-04-01 00:00:00 | 1990-04-01 00:00:00 | 1990-02-01 00:00:00 + 1990-05-03 00:00:00 | 1990-05-01 00:00:00 | 1990-05-01 00:00:00 | 1990-04-01 00:00:00 | 1990-05-01 00:00:00 | 1990-04-01 00:00:00 | 1990-05-01 00:00:00 + 1990-06-03 00:00:00 | 1990-06-01 00:00:00 | 1990-05-01 00:00:00 | 1990-04-01 00:00:00 | 1990-06-01 00:00:00 | 1990-06-01 00:00:00 | 1990-05-01 00:00:00 +(6 rows) + +SELECT + time, + time_bucket('1 month', time) AS "1m", + time_bucket('2 month', time) AS "2m", + time_bucket('3 month', time) AS "3m", + time_bucket('1 month', time, '2000-02-01'::timestamptz) AS "1m origin", + time_bucket('2 month', time, '2000-02-01'::timestamptz) AS "2m origin", + time_bucket('3 month', time, '2000-02-01'::timestamptz) AS "3m origin" +FROM generate_series('1990-01-03'::timestamptz,'1990-06-03'::timestamptz,'1month'::interval) time; + time | 1m | 2m | 3m | 1m origin | 2m origin | 3m origin +------------------------+------------------------+------------------------+------------------------+------------------------+------------------------+------------------------ + 1990-01-03 00:00:00-05 | 1989-12-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1989-11-30 19:00:00-05 | 1989-10-31 19:00:00-05 + 1990-02-03 00:00:00-05 | 1990-01-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1989-12-31 19:00:00-05 | 1990-01-31 19:00:00-05 | 1990-01-31 19:00:00-05 | 1990-01-31 19:00:00-05 + 1990-03-03 00:00:00-05 | 1990-02-28 19:00:00-05 | 1990-02-28 19:00:00-05 | 1989-12-31 19:00:00-05 | 1990-02-28 19:00:00-05 | 1990-01-31 19:00:00-05 | 1990-01-31 19:00:00-05 + 1990-04-03 00:00:00-04 | 1990-03-31 19:00:00-05 | 1990-02-28 19:00:00-05 | 1990-03-31 19:00:00-05 | 1990-03-31 19:00:00-05 | 1990-03-31 19:00:00-05 | 1990-01-31 19:00:00-05 + 1990-05-03 00:00:00-04 | 1990-04-30 20:00:00-04 | 1990-04-30 20:00:00-04 | 1990-03-31 19:00:00-05 | 1990-04-30 20:00:00-04 | 1990-03-31 19:00:00-05 | 1990-04-30 20:00:00-04 + 1990-06-03 00:00:00-04 | 1990-05-31 20:00:00-04 | 1990-04-30 20:00:00-04 | 1990-03-31 19:00:00-05 | 1990-05-31 20:00:00-04 | 1990-05-31 20:00:00-04 | 1990-04-30 20:00:00-04 +(6 rows) + +--------------------------------------- +--- Test time_bucket with timezones --- +--------------------------------------- +-- test NULL args +SELECT +time_bucket(NULL::interval,now(),'Europe/Berlin'), +time_bucket('1day',NULL::timestamptz,'Europe/Berlin'), +time_bucket('1day',now(),NULL::text), +time_bucket('1day','2020-02-03','Europe/Berlin',NULL), +time_bucket('1day','2020-02-03','Europe/Berlin','2020-04-01',NULL), +time_bucket('1day','2020-02-03','Europe/Berlin',NULL,NULL), +time_bucket('1day','2020-02-03','Europe/Berlin',"offset":=NULL::interval), +time_bucket('1day','2020-02-03','Europe/Berlin',origin:=NULL::timestamptz); + time_bucket | time_bucket | time_bucket | time_bucket | time_bucket | time_bucket | time_bucket | time_bucket +-------------+-------------+-------------+------------------------+------------------------+------------------------+------------------------+------------------------ + | | | 2020-02-02 18:00:00-05 | 2020-02-03 00:00:00-05 | 2020-02-02 18:00:00-05 | 2020-02-02 18:00:00-05 | 2020-02-02 18:00:00-05 +(1 row) + +SET datestyle TO ISO; +SELECT + time_bucket('1day', ts) AS "UTC", + time_bucket('1day', ts, 'Europe/Berlin') AS "Berlin", + time_bucket('1day', ts, 'Europe/London') AS "London", + time_bucket('1day', ts, 'America/New_York') AS "New York", + time_bucket('1day', ts, 'PST') AS "PST", + time_bucket('1day', ts, current_setting('timezone')) AS "current" +FROM generate_series('1999-12-31 17:00'::timestamptz,'2000-01-02 3:00'::timestamptz, '1hour'::interval) ts; + UTC | Berlin | London | New York | PST | current +------------------------+------------------------+------------------------+------------------------+------------------------+------------------------ + 1999-12-30 19:00:00-05 | 1999-12-30 18:00:00-05 | 1999-12-30 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-30 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-30 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 1999-12-31 00:00:00-05 | 1999-12-31 03:00:00-05 | 1999-12-31 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-31 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-31 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-31 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 1999-12-31 19:00:00-05 | 2000-01-01 18:00:00-05 | 1999-12-31 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-01 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-02 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-02 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-02 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-02 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-02 00:00:00-05 | 2000-01-01 03:00:00-05 | 2000-01-02 00:00:00-05 + 2000-01-01 19:00:00-05 | 2000-01-01 18:00:00-05 | 2000-01-01 19:00:00-05 | 2000-01-02 00:00:00-05 | 2000-01-02 03:00:00-05 | 2000-01-02 00:00:00-05 +(35 rows) + +SELECT + time_bucket('1month', ts) AS "UTC", + time_bucket('1month', ts, 'Europe/Berlin') AS "Berlin", + time_bucket('1month', ts, 'America/New_York') AS "New York", + time_bucket('1month', ts, current_setting('timezone')) AS "current", + time_bucket('2month', ts, current_setting('timezone')) AS "2m", + time_bucket('2month', ts, current_setting('timezone'), '2000-02-01'::timestamp) AS "2m origin", + time_bucket('2month', ts, current_setting('timezone'), "offset":='14 day'::interval) AS "2m offset", + time_bucket('2month', ts, current_setting('timezone'), '2000-02-01'::timestamp, '7 day'::interval) AS "2m offset + origin" +FROM generate_series('1999-12-01'::timestamptz,'2000-09-01'::timestamptz, '9 day'::interval) ts; + UTC | Berlin | New York | current | 2m | 2m origin | 2m offset | 2m offset + origin +------------------------+------------------------+------------------------+------------------------+------------------------+------------------------+------------------------+------------------------ + 1999-11-30 19:00:00-05 | 1999-11-30 18:00:00-05 | 1999-12-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-10-08 00:00:00-04 + 1999-11-30 19:00:00-05 | 1999-11-30 18:00:00-05 | 1999-12-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-11-30 19:00:00-05 | 1999-11-30 18:00:00-05 | 1999-12-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-11-30 19:00:00-05 | 1999-11-30 18:00:00-05 | 1999-12-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 1999-11-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 1999-12-31 19:00:00-05 | 1999-12-31 18:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 1999-12-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 2000-01-31 19:00:00-05 | 2000-01-31 18:00:00-05 | 2000-02-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 1999-12-08 00:00:00-05 + 2000-01-31 19:00:00-05 | 2000-01-31 18:00:00-05 | 2000-02-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-01-31 19:00:00-05 | 2000-01-31 18:00:00-05 | 2000-02-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-01-31 19:00:00-05 | 2000-01-31 18:00:00-05 | 2000-02-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-02-29 19:00:00-05 | 2000-02-29 18:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-01-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-02-29 19:00:00-05 | 2000-02-29 18:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-02-29 19:00:00-05 | 2000-02-29 18:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-02-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-03-31 19:00:00-05 | 2000-03-31 17:00:00-05 | 2000-04-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-02-08 00:00:00-05 + 2000-03-31 19:00:00-05 | 2000-03-31 17:00:00-05 | 2000-04-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-04-08 00:00:00-04 + 2000-03-31 19:00:00-05 | 2000-03-31 17:00:00-05 | 2000-04-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-01 00:00:00-05 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-04-08 00:00:00-04 + 2000-04-30 20:00:00-04 | 2000-04-30 18:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-04-08 00:00:00-04 + 2000-04-30 20:00:00-04 | 2000-04-30 18:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-04-01 00:00:00-05 | 2000-03-15 00:00:00-05 | 2000-04-08 00:00:00-04 + 2000-04-30 20:00:00-04 | 2000-04-30 18:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-04-01 00:00:00-05 | 2000-05-15 00:00:00-04 | 2000-04-08 00:00:00-04 + 2000-04-30 20:00:00-04 | 2000-04-30 18:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-04-01 00:00:00-05 | 2000-05-15 00:00:00-04 | 2000-04-08 00:00:00-04 + 2000-05-31 20:00:00-04 | 2000-05-31 18:00:00-04 | 2000-06-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-04-08 00:00:00-04 + 2000-05-31 20:00:00-04 | 2000-05-31 18:00:00-04 | 2000-06-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-05-31 20:00:00-04 | 2000-05-31 18:00:00-04 | 2000-06-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-06-30 20:00:00-04 | 2000-06-30 18:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-06-30 20:00:00-04 | 2000-06-30 18:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-05-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-06-30 20:00:00-04 | 2000-06-30 18:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-06-30 20:00:00-04 | 2000-06-30 18:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-06-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-06-08 00:00:00-04 + 2000-07-31 20:00:00-04 | 2000-07-31 18:00:00-04 | 2000-08-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-08-08 00:00:00-04 + 2000-07-31 20:00:00-04 | 2000-07-31 18:00:00-04 | 2000-08-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-08-08 00:00:00-04 + 2000-07-31 20:00:00-04 | 2000-07-31 18:00:00-04 | 2000-08-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-01 00:00:00-04 | 2000-08-01 00:00:00-04 | 2000-07-15 00:00:00-04 | 2000-08-08 00:00:00-04 +(31 rows) + +RESET datestyle; +------------------------------------------------------------ +--- Test timescaledb_experimental.time_bucket_ng function -- +------------------------------------------------------------ +-- not supported functionality +\set ON_ERROR_STOP 0 +SELECT timescaledb_experimental.time_bucket_ng('1 hour', '2001-02-03' :: date) AS result; +ERROR: interval must be either days and weeks, or months and years +SELECT timescaledb_experimental.time_bucket_ng('0 days', '2001-02-03' :: date) AS result; +ERROR: interval must be at least one day +SELECT timescaledb_experimental.time_bucket_ng('1 month', '2001-02-03' :: date, origin => '2000-01-02') AS result; +ERROR: origin must be the first day of the month +HINT: When using timestamptz-version of the function, 'origin' is converted to provided 'timezone'. +SELECT timescaledb_experimental.time_bucket_ng('1 month', '2000-01-02' :: date, origin => '2001-01-01') AS result; + result +------------ + 01-01-2000 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 day', '2000-01-02' :: date, origin => '2001-01-01') AS result; +ERROR: origin must be before the given date +SELECT timescaledb_experimental.time_bucket_ng('1 month 3 hours', '2021-11-22' :: timestamp) AS result; +ERROR: interval can't combine months with minutes or hours +-- timestamp is less than the default 'origin' value +SELECT timescaledb_experimental.time_bucket_ng('1 day', '1999-01-01 12:34:56 MSK' :: timestamptz, timezone => 'MSK'); +ERROR: origin must be before the given date +-- 'origin' in Europe/Moscow timezone is not the first day of the month at given time zone (UTC in this case) +select timescaledb_experimental.time_bucket_ng('1 month', '2021-07-12 12:34:56 Europe/Moscow' :: timestamptz, origin => '2021-06-01 00:00:00 Europe/Moscow' :: timestamptz, timezone => 'UTC'); +ERROR: origin must be the first day of the month +HINT: When using timestamptz-version of the function, 'origin' is converted to provided 'timezone'. +\set ON_ERROR_STOP 1 +-- wrappers +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-11-22' :: timestamp) AS result; + result +-------------------------- + Fri Jan 01 00:00:00 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-11-22' :: timestamptz) AS result; + result +------------------------------ + Fri Jan 01 00:00:00 2021 EST +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-11-22' :: timestamp, origin => '2021-06-01') AS result; + result +-------------------------- + Tue Jun 01 00:00:00 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-11-22' :: timestamptz, origin => '2021-06-01') AS result; + result +------------------------------ + Tue Jun 01 00:00:00 2021 EDT +(1 row) + +-- null argument +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: date) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamp) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamptz) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamptz, timezone => 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: date, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamp, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamptz, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', null :: timestamptz, origin => '2021-06-01', timezone => 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +-- null interval +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12' :: date) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamp) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamptz) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamptz, 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12' :: date, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamp, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamptz, origin => '2021-06-01') AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng(null, '2021-07-12 12:34:56' :: timestamptz, origin => '2021-06-01', timezone => 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +-- null origin +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12' :: date, origin => null) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamp, origin => null) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, origin => null) AS result; + result +-------- + +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, origin => null, timezone => 'Europe/Moscow') AS result; + result +-------- + +(1 row) + +-- infinity argument +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: date) AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamp) AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamptz) AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamptz, timezone => 'Europe/Moscow') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: date, origin => '2021-06-01') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamp, origin => '2021-06-01') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamptz, origin => '2021-06-01') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', 'infinity' :: timestamptz, origin => '2021-06-01', timezone => 'Europe/Moscow') AS result; + result +---------- + infinity +(1 row) + +-- test for specific code path: hours/minutes/seconds interval and timestamp argument +SELECT timescaledb_experimental.time_bucket_ng('12 hours', 'infinity' :: timestamp) AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('12 hours', 'infinity' :: timestamp, origin => '2021-06-01') AS result; + result +---------- + infinity +(1 row) + +-- infinite origin +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12' :: date, origin => 'infinity') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamp, origin => 'infinity') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, origin => 'infinity') AS result; + result +---------- + infinity +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, origin => 'infinity', timezone => 'Europe/Moscow') AS result; + result +---------- + infinity +(1 row) + +-- test for specific code path: hours/minutes/seconds interval and timestamp argument +SELECT timescaledb_experimental.time_bucket_ng('12 hours', '2021-07-12 12:34:56' :: timestamp, origin => 'infinity') AS result; + result +---------- + infinity +(1 row) + +-- test for invalid timezone argument +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, timezone => null) AS result; + result +-------- + +(1 row) + +\set ON_ERROR_STOP 0 +SELECT timescaledb_experimental.time_bucket_ng('1 year', '2021-07-12 12:34:56' :: timestamptz, timezone => 'Europe/Ololondon') AS result; +ERROR: time zone "Europe/Ololondon" not recognized +\set ON_ERROR_STOP 1 +-- Make sure time_bucket_ng() supports seconds, minutes, and hours. +-- We happen to know that the internal implementation is the same +-- as for time_bucket(), thus there is no reason to execute all the tests +-- we already have for time_bucket(). These two functions will most likely +-- be merged eventually anyway. +SELECT timescaledb_experimental.time_bucket_ng('30 seconds', '2021-07-12 12:34:56' :: timestamp) AS result; + result +-------------------------- + Mon Jul 12 12:34:30 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('15 minutes', '2021-07-12 12:34:56' :: timestamp) AS result; + result +-------------------------- + Mon Jul 12 12:30:00 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('6 hours', '2021-07-12 12:34:56' :: timestamp) AS result; + result +-------------------------- + Mon Jul 12 12:00:00 2021 +(1 row) + +-- Same as above, but with provided 'origin' argument. +SELECT timescaledb_experimental.time_bucket_ng('30 seconds', '2021-07-12 12:34:56' :: timestamp, origin => '2021-07-12 12:10:00') AS result; + result +-------------------------- + Mon Jul 12 12:34:30 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('15 minutes', '2021-07-12 12:34:56' :: timestamp, origin => '2021-07-12 12:10:00') AS result; + result +-------------------------- + Mon Jul 12 12:25:00 2021 +(1 row) + +SELECT timescaledb_experimental.time_bucket_ng('6 hours', '2021-07-12 12:34:56' :: timestamp, origin => '2021-07-12 12:10:00') AS result; + result +-------------------------- + Mon Jul 12 12:10:00 2021 +(1 row) + +-- N days / weeks buckets +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 day', d), 'YYYY-MM-DD') AS d1, + to_char(timescaledb_experimental.time_bucket_ng('2 days', d), 'YYYY-MM-DD') AS d2, + to_char(timescaledb_experimental.time_bucket_ng('3 days', d), 'YYYY-MM-DD') AS d3, + to_char(timescaledb_experimental.time_bucket_ng('1 week', d), 'YYYY-MM-DD') AS w1, + to_char(timescaledb_experimental.time_bucket_ng('1 week 2 days', d), 'YYYY-MM-DD') AS w1d2 +FROM generate_series('2020-01-01' :: date, '2020-01-12', '1 day') AS ts, + unnest(array[ts :: date]) AS d; + d | d1 | d2 | d3 | w1 | w1d2 +------------+------------+------------+------------+------------+------------ + 2020-01-01 | 2020-01-01 | 2019-12-31 | 2020-01-01 | 2019-12-28 | 2019-12-26 + 2020-01-02 | 2020-01-02 | 2020-01-02 | 2020-01-01 | 2019-12-28 | 2019-12-26 + 2020-01-03 | 2020-01-03 | 2020-01-02 | 2020-01-01 | 2019-12-28 | 2019-12-26 + 2020-01-04 | 2020-01-04 | 2020-01-04 | 2020-01-04 | 2020-01-04 | 2020-01-04 + 2020-01-05 | 2020-01-05 | 2020-01-04 | 2020-01-04 | 2020-01-04 | 2020-01-04 + 2020-01-06 | 2020-01-06 | 2020-01-06 | 2020-01-04 | 2020-01-04 | 2020-01-04 + 2020-01-07 | 2020-01-07 | 2020-01-06 | 2020-01-07 | 2020-01-04 | 2020-01-04 + 2020-01-08 | 2020-01-08 | 2020-01-08 | 2020-01-07 | 2020-01-04 | 2020-01-04 + 2020-01-09 | 2020-01-09 | 2020-01-08 | 2020-01-07 | 2020-01-04 | 2020-01-04 + 2020-01-10 | 2020-01-10 | 2020-01-10 | 2020-01-10 | 2020-01-04 | 2020-01-04 + 2020-01-11 | 2020-01-11 | 2020-01-10 | 2020-01-10 | 2020-01-11 | 2020-01-04 + 2020-01-12 | 2020-01-12 | 2020-01-12 | 2020-01-10 | 2020-01-11 | 2020-01-04 +(12 rows) + +-- N days / weeks buckets with given 'origin' +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 day', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS d1, + to_char(timescaledb_experimental.time_bucket_ng('2 days', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS d2, + to_char(timescaledb_experimental.time_bucket_ng('3 days', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS d3, + to_char(timescaledb_experimental.time_bucket_ng('1 week', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS w1, + to_char(timescaledb_experimental.time_bucket_ng('1 week 2 days', d, origin => '2020-01-01'), 'YYYY-MM-DD') AS w1d2 +FROM generate_series('2020-01-01' :: date, '2020-01-12', '1 day') AS ts, + unnest(array[ts :: date]) AS d; + d | d1 | d2 | d3 | w1 | w1d2 +------------+------------+------------+------------+------------+------------ + 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-01-02 | 2020-01-02 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-01-03 | 2020-01-03 | 2020-01-03 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-01-04 | 2020-01-04 | 2020-01-03 | 2020-01-04 | 2020-01-01 | 2020-01-01 + 2020-01-05 | 2020-01-05 | 2020-01-05 | 2020-01-04 | 2020-01-01 | 2020-01-01 + 2020-01-06 | 2020-01-06 | 2020-01-05 | 2020-01-04 | 2020-01-01 | 2020-01-01 + 2020-01-07 | 2020-01-07 | 2020-01-07 | 2020-01-07 | 2020-01-01 | 2020-01-01 + 2020-01-08 | 2020-01-08 | 2020-01-07 | 2020-01-07 | 2020-01-08 | 2020-01-01 + 2020-01-09 | 2020-01-09 | 2020-01-09 | 2020-01-07 | 2020-01-08 | 2020-01-01 + 2020-01-10 | 2020-01-10 | 2020-01-09 | 2020-01-10 | 2020-01-08 | 2020-01-10 + 2020-01-11 | 2020-01-11 | 2020-01-11 | 2020-01-10 | 2020-01-08 | 2020-01-10 + 2020-01-12 | 2020-01-12 | 2020-01-11 | 2020-01-10 | 2020-01-08 | 2020-01-10 +(12 rows) + +-- N month buckets +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 month', d), 'YYYY-MM-DD') AS m1, + to_char(timescaledb_experimental.time_bucket_ng('2 month', d), 'YYYY-MM-DD') AS m2, + to_char(timescaledb_experimental.time_bucket_ng('3 month', d), 'YYYY-MM-DD') AS m3, + to_char(timescaledb_experimental.time_bucket_ng('4 month', d), 'YYYY-MM-DD') AS m4, + to_char(timescaledb_experimental.time_bucket_ng('5 month', d), 'YYYY-MM-DD') AS m5 +FROM generate_series('2020-01-01' :: date, '2020-12-01', '1 month') AS ts, + unnest(array[ts :: date]) AS d; + d | m1 | m2 | m3 | m4 | m5 +------------+------------+------------+------------+------------+------------ + 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-02-01 | 2020-02-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-03-01 | 2020-03-01 | 2020-03-01 | 2020-01-01 | 2020-01-01 | 2020-01-01 + 2020-04-01 | 2020-04-01 | 2020-03-01 | 2020-04-01 | 2020-01-01 | 2020-01-01 + 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-04-01 | 2020-05-01 | 2020-01-01 + 2020-06-01 | 2020-06-01 | 2020-05-01 | 2020-04-01 | 2020-05-01 | 2020-06-01 + 2020-07-01 | 2020-07-01 | 2020-07-01 | 2020-07-01 | 2020-05-01 | 2020-06-01 + 2020-08-01 | 2020-08-01 | 2020-07-01 | 2020-07-01 | 2020-05-01 | 2020-06-01 + 2020-09-01 | 2020-09-01 | 2020-09-01 | 2020-07-01 | 2020-09-01 | 2020-06-01 + 2020-10-01 | 2020-10-01 | 2020-09-01 | 2020-10-01 | 2020-09-01 | 2020-06-01 + 2020-11-01 | 2020-11-01 | 2020-11-01 | 2020-10-01 | 2020-09-01 | 2020-11-01 + 2020-12-01 | 2020-12-01 | 2020-11-01 | 2020-10-01 | 2020-09-01 | 2020-11-01 +(12 rows) + +-- N month buckets with given 'origin' +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m1, + to_char(timescaledb_experimental.time_bucket_ng('2 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m2, + to_char(timescaledb_experimental.time_bucket_ng('3 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m3, + to_char(timescaledb_experimental.time_bucket_ng('4 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m4, + to_char(timescaledb_experimental.time_bucket_ng('5 month', d, origin => '2019-05-01'), 'YYYY-MM-DD') AS m5 +FROM generate_series('2020-01-01' :: date, '2020-12-01', '1 month') AS ts, + unnest(array[ts :: date]) AS d; + d | m1 | m2 | m3 | m4 | m5 +------------+------------+------------+------------+------------+------------ + 2020-01-01 | 2020-01-01 | 2020-01-01 | 2019-11-01 | 2020-01-01 | 2019-10-01 + 2020-02-01 | 2020-02-01 | 2020-01-01 | 2020-02-01 | 2020-01-01 | 2019-10-01 + 2020-03-01 | 2020-03-01 | 2020-03-01 | 2020-02-01 | 2020-01-01 | 2020-03-01 + 2020-04-01 | 2020-04-01 | 2020-03-01 | 2020-02-01 | 2020-01-01 | 2020-03-01 + 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-03-01 + 2020-06-01 | 2020-06-01 | 2020-05-01 | 2020-05-01 | 2020-05-01 | 2020-03-01 + 2020-07-01 | 2020-07-01 | 2020-07-01 | 2020-05-01 | 2020-05-01 | 2020-03-01 + 2020-08-01 | 2020-08-01 | 2020-07-01 | 2020-08-01 | 2020-05-01 | 2020-08-01 + 2020-09-01 | 2020-09-01 | 2020-09-01 | 2020-08-01 | 2020-09-01 | 2020-08-01 + 2020-10-01 | 2020-10-01 | 2020-09-01 | 2020-08-01 | 2020-09-01 | 2020-08-01 + 2020-11-01 | 2020-11-01 | 2020-11-01 | 2020-11-01 | 2020-09-01 | 2020-08-01 + 2020-12-01 | 2020-12-01 | 2020-11-01 | 2020-11-01 | 2020-09-01 | 2020-08-01 +(12 rows) + +-- N years / N years, M month buckets +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 year', d), 'YYYY-MM-DD') AS y1, + to_char(timescaledb_experimental.time_bucket_ng('1 year 6 month', d), 'YYYY-MM-DD') AS y1m6, + to_char(timescaledb_experimental.time_bucket_ng('2 years', d), 'YYYY-MM-DD') AS y2, + to_char(timescaledb_experimental.time_bucket_ng('2 years 6 month', d), 'YYYY-MM-DD') AS y2m6, + to_char(timescaledb_experimental.time_bucket_ng('3 years', d), 'YYYY-MM-DD') AS y3 +FROM generate_series('2015-01-01' :: date, '2020-12-01', '6 month') AS ts, + unnest(array[ts :: date]) AS d; + d | y1 | y1m6 | y2 | y2m6 | y3 +------------+------------+------------+------------+------------+------------ + 2015-01-01 | 2015-01-01 | 2015-01-01 | 2014-01-01 | 2015-01-01 | 2015-01-01 + 2015-07-01 | 2015-01-01 | 2015-01-01 | 2014-01-01 | 2015-01-01 | 2015-01-01 + 2016-01-01 | 2016-01-01 | 2015-01-01 | 2016-01-01 | 2015-01-01 | 2015-01-01 + 2016-07-01 | 2016-01-01 | 2016-07-01 | 2016-01-01 | 2015-01-01 | 2015-01-01 + 2017-01-01 | 2017-01-01 | 2016-07-01 | 2016-01-01 | 2015-01-01 | 2015-01-01 + 2017-07-01 | 2017-01-01 | 2016-07-01 | 2016-01-01 | 2017-07-01 | 2015-01-01 + 2018-01-01 | 2018-01-01 | 2018-01-01 | 2018-01-01 | 2017-07-01 | 2018-01-01 + 2018-07-01 | 2018-01-01 | 2018-01-01 | 2018-01-01 | 2017-07-01 | 2018-01-01 + 2019-01-01 | 2019-01-01 | 2018-01-01 | 2018-01-01 | 2017-07-01 | 2018-01-01 + 2019-07-01 | 2019-01-01 | 2019-07-01 | 2018-01-01 | 2017-07-01 | 2018-01-01 + 2020-01-01 | 2020-01-01 | 2019-07-01 | 2020-01-01 | 2020-01-01 | 2018-01-01 + 2020-07-01 | 2020-01-01 | 2019-07-01 | 2020-01-01 | 2020-01-01 | 2018-01-01 +(12 rows) + +-- N years / N years, M month buckets with given 'origin' +SELECT to_char(d, 'YYYY-MM-DD') AS d, + to_char(timescaledb_experimental.time_bucket_ng('1 year', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y1, + to_char(timescaledb_experimental.time_bucket_ng('1 year 6 month', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y1m6, + to_char(timescaledb_experimental.time_bucket_ng('2 years', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y2, + to_char(timescaledb_experimental.time_bucket_ng('2 years 6 month', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y2m6, + to_char(timescaledb_experimental.time_bucket_ng('3 years', d, origin => '2000-06-01'), 'YYYY-MM-DD') AS y3 +FROM generate_series('2015-01-01' :: date, '2020-12-01', '6 month') AS ts, + unnest(array[ts :: date]) AS d; + d | y1 | y1m6 | y2 | y2m6 | y3 +------------+------------+------------+------------+------------+------------ + 2015-01-01 | 2014-06-01 | 2013-12-01 | 2014-06-01 | 2012-12-01 | 2012-06-01 + 2015-07-01 | 2015-06-01 | 2015-06-01 | 2014-06-01 | 2015-06-01 | 2015-06-01 + 2016-01-01 | 2015-06-01 | 2015-06-01 | 2014-06-01 | 2015-06-01 | 2015-06-01 + 2016-07-01 | 2016-06-01 | 2015-06-01 | 2016-06-01 | 2015-06-01 | 2015-06-01 + 2017-01-01 | 2016-06-01 | 2016-12-01 | 2016-06-01 | 2015-06-01 | 2015-06-01 + 2017-07-01 | 2017-06-01 | 2016-12-01 | 2016-06-01 | 2015-06-01 | 2015-06-01 + 2018-01-01 | 2017-06-01 | 2016-12-01 | 2016-06-01 | 2017-12-01 | 2015-06-01 + 2018-07-01 | 2018-06-01 | 2018-06-01 | 2018-06-01 | 2017-12-01 | 2018-06-01 + 2019-01-01 | 2018-06-01 | 2018-06-01 | 2018-06-01 | 2017-12-01 | 2018-06-01 + 2019-07-01 | 2019-06-01 | 2018-06-01 | 2018-06-01 | 2017-12-01 | 2018-06-01 + 2020-01-01 | 2019-06-01 | 2019-12-01 | 2018-06-01 | 2017-12-01 | 2018-06-01 + 2020-07-01 | 2020-06-01 | 2019-12-01 | 2020-06-01 | 2020-06-01 | 2018-06-01 +(12 rows) + +-- Test timezones support with different bucket sizes +BEGIN; +-- Timestamptz type is displayed in the session timezone. +-- To get consistent results during the test we temporary set the session +-- timezone to the known one. +SET TIME ZONE '+00'; +-- Moscow is UTC+3 in the year 2021. Let's say you are dealing with '1 day' bucket. +-- In order to calculate the beginning of the bucket you have to take LOCAL +-- Moscow time and throw away the time. You will get the midnight. The new day +-- starts 3 hours EARLIER in Moscow than in UTC+0 time zone, thus resulting +-- timestamp will be 3 hours LESS than for UTC+0. +SELECT bs, tz, to_char(ts_out, 'YYYY-MM-DD HH24:MI:SS TZ') as res +FROM unnest(array['Europe/Moscow', 'UTC']) as tz, + unnest(array['12 hours', '1 day', '1 month', '4 months', '1 year']) as bs, + unnest(array['2021-07-12 12:34:56 Europe/Moscow' :: timestamptz]) as ts_in, + unnest(array[timescaledb_experimental.time_bucket_ng(bs :: interval, ts_in, timezone => tz)]) as ts_out +ORDER BY tz, bs :: interval; + bs | tz | res +----------+---------------+------------------------- + 12 hours | Europe/Moscow | 2021-07-12 09:00:00 +00 + 1 day | Europe/Moscow | 2021-07-11 21:00:00 +00 + 1 month | Europe/Moscow | 2021-06-30 21:00:00 +00 + 4 months | Europe/Moscow | 2021-04-30 21:00:00 +00 + 1 year | Europe/Moscow | 2020-12-31 21:00:00 +00 + 12 hours | UTC | 2021-07-12 00:00:00 +00 + 1 day | UTC | 2021-07-12 00:00:00 +00 + 1 month | UTC | 2021-07-01 00:00:00 +00 + 4 months | UTC | 2021-05-01 00:00:00 +00 + 1 year | UTC | 2021-01-01 00:00:00 +00 +(10 rows) + +-- Same as above, but with 'origin' +SELECT bs, tz, to_char(ts_out, 'YYYY-MM-DD HH24:MI:SS TZ') as res +FROM unnest(array['Europe/Moscow']) as tz, + unnest(array['12 hours', '1 day', '1 month', '4 months', '1 year']) as bs, + unnest(array['2021-07-12 12:34:56 Europe/Moscow' :: timestamptz]) as ts_in, + unnest(array['2021-06-01 00:00:00 Europe/Moscow' :: timestamptz]) as origin_in, + unnest(array[timescaledb_experimental.time_bucket_ng(bs :: interval, ts_in, origin => origin_in, timezone => tz)]) as ts_out +ORDER BY tz, bs :: interval; + bs | tz | res +----------+---------------+------------------------- + 12 hours | Europe/Moscow | 2021-07-12 09:00:00 +00 + 1 day | Europe/Moscow | 2021-07-11 21:00:00 +00 + 1 month | Europe/Moscow | 2021-06-30 21:00:00 +00 + 4 months | Europe/Moscow | 2021-05-31 21:00:00 +00 + 1 year | Europe/Moscow | 2021-05-31 21:00:00 +00 +(5 rows) + +-- Overwritten origin allows to work with dates earlier than the default origin +SELECT to_char(timescaledb_experimental.time_bucket_ng('1 day', '1999-01-01 12:34:56 MSK' :: timestamptz, origin => '1900-01-01 00:00:00 MSK', timezone => 'MSK'), 'YYYY-MM-DD HH24:MI:SS TZ'); + to_char +------------------------- + 1998-12-31 21:00:00 +00 +(1 row) + +-- Restore previously used time zone. +ROLLBACK; +------------------------------------- +--- Test time input functions -- +------------------------------------- +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE OR REPLACE FUNCTION test.interval_to_internal(coltype REGTYPE, value ANYELEMENT = NULL::BIGINT) RETURNS BIGINT +AS :MODULE_PATHNAME, 'ts_dimension_interval_to_internal_test' LANGUAGE C VOLATILE; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT test.interval_to_internal('TIMESTAMP'::regtype, INTERVAL '1 day'); + interval_to_internal +---------------------- + 86400000000 +(1 row) + +SELECT test.interval_to_internal('TIMESTAMP'::regtype, 86400000000); + interval_to_internal +---------------------- + 86400000000 +(1 row) + +---should give warning +SELECT test.interval_to_internal('TIMESTAMP'::regtype, 86400); +WARNING: unexpected interval: smaller than one second +HINT: The interval is specified in microseconds. + interval_to_internal +---------------------- + 86400 +(1 row) + +SELECT test.interval_to_internal('TIMESTAMP'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + +SELECT test.interval_to_internal('BIGINT'::regtype, 2147483649::bigint); + interval_to_internal +---------------------- + 2147483649 +(1 row) + +-- Default interval for integer is supported as part of +-- hypertable generalization +SELECT test.interval_to_internal('INT'::regtype); + interval_to_internal +---------------------- + 100000 +(1 row) + +SELECT test.interval_to_internal('SMALLINT'::regtype); + interval_to_internal +---------------------- + 10000 +(1 row) + +SELECT test.interval_to_internal('BIGINT'::regtype); + interval_to_internal +---------------------- + 1000000 +(1 row) + +SELECT test.interval_to_internal('TIMESTAMPTZ'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + +SELECT test.interval_to_internal('TIMESTAMP'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + +SELECT test.interval_to_internal('DATE'::regtype); + interval_to_internal +---------------------- + 604800000000 +(1 row) + +\set VERBOSITY terse +\set ON_ERROR_STOP 0 +SELECT test.interval_to_internal('INT'::regtype, 2147483649::bigint); +ERROR: invalid interval: must be between 1 and 2147483647 +SELECT test.interval_to_internal('SMALLINT'::regtype, 32768::bigint); +ERROR: invalid interval: must be between 1 and 32767 +SELECT test.interval_to_internal('TEXT'::regtype, 32768::bigint); +ERROR: invalid type for dimension "testcol" +SELECT test.interval_to_internal('INT'::regtype, INTERVAL '1 day'); +ERROR: invalid interval type for integer dimension +\set ON_ERROR_STOP 1 diff --git a/test/expected/ts_merge.out b/test/expected/ts_merge-13.out similarity index 100% rename from test/expected/ts_merge.out rename to test/expected/ts_merge-13.out diff --git a/test/expected/ts_merge-14.out b/test/expected/ts_merge-14.out new file mode 100644 index 00000000000..5e0aa61ec92 --- /dev/null +++ b/test/expected/ts_merge-14.out @@ -0,0 +1,2551 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +\set ON_ERROR_STOP 0 +\set VERBOSITY default +SET client_min_messages TO error; +\set TEST_BASE_NAME ts_merge +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') AS "TEST_LOAD_NAME", + format('include/%s_load_ht.sql', :'TEST_BASE_NAME') AS "TEST_LOAD_HT_NAME", + format('include/%s_query.sql', :'TEST_BASE_NAME') AS "TEST_QUERY_NAME", + format('%s/results/%s_results.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_WITH_HYPERTABLE", + format('%s/results/%s_ht_results.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_WITH_NO_HYPERTABLE" \gset +SELECT format('\! diff -u --label "Base pg table results" --label "Hyperatable results" %s %s', :'TEST_RESULTS_WITH_HYPERTABLE', :'TEST_RESULTS_WITH_NO_HYPERTABLE') AS "DIFF_CMD" \gset +\ir :TEST_LOAD_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE USER regress_merge_privs; +CREATE USER regress_merge_no_privs; +DROP TABLE IF EXISTS target; +DROP TABLE IF EXISTS source; +CREATE TABLE target (tid integer, balance integer) + WITH (autovacuum_enabled=off); +CREATE TABLE source (sid integer, delta integer) -- no index + WITH (autovacuum_enabled=off); +INSERT INTO target VALUES (1, 10); +INSERT INTO target VALUES (2, 20); +INSERT INTO target VALUES (3, 30); +SELECT t.ctid is not null as matched, t.*, s.* FROM source s FULL OUTER JOIN target t ON s.sid = t.tid ORDER BY t.tid, s.sid; + matched | tid | balance | sid | delta +---------+-----+---------+-----+------- + t | 1 | 10 | | + t | 2 | 20 | | + t | 3 | 30 | | +(3 rows) + +ALTER TABLE target OWNER TO regress_merge_privs; +ALTER TABLE source OWNER TO regress_merge_privs; +CREATE TABLE target2 (tid integer, balance integer) + WITH (autovacuum_enabled=off); +CREATE TABLE source2 (sid integer, delta integer) + WITH (autovacuum_enabled=off); +ALTER TABLE target2 OWNER TO regress_merge_no_privs; +ALTER TABLE source2 OWNER TO regress_merge_no_privs; +GRANT INSERT ON target TO regress_merge_no_privs; +GRANT CREATE ON SCHEMA public TO regress_merge_privs; +SET SESSION AUTHORIZATION regress_merge_privs; +CREATE TABLE sq_target (tid integer NOT NULL, balance integer) + WITH (autovacuum_enabled=off); +CREATE TABLE sq_source (delta integer, sid integer, balance integer DEFAULT 0) + WITH (autovacuum_enabled=off); +INSERT INTO sq_target(tid, balance) VALUES (1,100), (2,200), (3,300); +INSERT INTO sq_source(sid, delta) VALUES (1,10), (2,20), (4,40); +-- conditional WHEN clause +CREATE TABLE wq_target (tid integer not null, balance integer DEFAULT -1) + WITH (autovacuum_enabled=off); +CREATE TABLE wq_source (balance integer, sid integer) + WITH (autovacuum_enabled=off); +INSERT INTO wq_source (sid, balance) VALUES (1, 100); +CREATE TABLE cj_target (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE cj_source1 (sid1 integer, scat integer, delta integer) + WITH (autovacuum_enabled=off); +CREATE TABLE cj_source2 (sid2 integer, sval text) + WITH (autovacuum_enabled=off); +INSERT INTO cj_source1 VALUES (1, 10, 100); +INSERT INTO cj_source1 VALUES (1, 20, 200); +INSERT INTO cj_source1 VALUES (2, 20, 300); +INSERT INTO cj_source1 VALUES (3, 10, 400); +INSERT INTO cj_source2 VALUES (1, 'initial source2'); +INSERT INTO cj_source2 VALUES (2, 'initial source2'); +INSERT INTO cj_source2 VALUES (3, 'initial source2'); +CREATE TABLE fs_target (a int, b int, c text) + WITH (autovacuum_enabled=off); +-- run tests on normal table +\o :TEST_RESULTS_WITH_NO_HYPERTABLE +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- +-- Errors +-- +MERGE INTO target t RANDOMWORD +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:12: ERROR: syntax error at or near "RANDOMWORD" +LINE 1: MERGE INTO target t RANDOMWORD + ^ +-- MATCHED/INSERT error +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:18: ERROR: syntax error at or near "INSERT" +LINE 5: INSERT DEFAULT VALUES; + ^ +-- incorrectly specifying INTO target +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT INTO target DEFAULT VALUES; +psql:include/ts_merge_query.sql:24: ERROR: syntax error at or near "INTO" +LINE 5: INSERT INTO target DEFAULT VALUES; + ^ +-- Multiple VALUES clause +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (1,1), (2,2); +psql:include/ts_merge_query.sql:30: ERROR: syntax error at or near "," +LINE 5: INSERT VALUES (1,1), (2,2); + ^ +-- SELECT query for INSERT +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT SELECT (1, 1); +psql:include/ts_merge_query.sql:36: ERROR: syntax error at or near "SELECT" +LINE 5: INSERT SELECT (1, 1); + ^ +-- NOT MATCHED/UPDATE +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:42: ERROR: syntax error at or near "UPDATE" +LINE 5: UPDATE SET balance = 0; + ^ +-- UPDATE tablename +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE target SET balance = 0; +psql:include/ts_merge_query.sql:48: ERROR: syntax error at or near "target" +LINE 5: UPDATE target SET balance = 0; + ^ +-- source and target names the same +MERGE INTO target +USING target +ON tid = tid +WHEN MATCHED THEN DO NOTHING; +psql:include/ts_merge_query.sql:53: ERROR: name "target" specified more than once +DETAIL: The name is used both as MERGE target table and data source. +-- used in a CTE +WITH foo AS ( + MERGE INTO target USING source ON (true) + WHEN MATCHED THEN DELETE +) SELECT * FROM foo; +psql:include/ts_merge_query.sql:58: ERROR: MERGE not supported in WITH query +LINE 1: WITH foo AS ( + ^ +-- used in COPY +COPY ( + MERGE INTO target USING source ON (true) + WHEN MATCHED THEN DELETE +) TO stdout; +psql:include/ts_merge_query.sql:63: ERROR: MERGE not supported in COPY +-- unsupported relation types +-- view +CREATE VIEW tv AS SELECT * FROM target; +MERGE INTO tv t +USING source s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:72: ERROR: cannot execute MERGE on relation "tv" +DETAIL: This operation is not supported for views. +DROP VIEW tv; +-- materialized view +CREATE MATERIALIZED VIEW mv AS SELECT * FROM target; +MERGE INTO mv t +USING source s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:81: ERROR: cannot execute MERGE on relation "mv" +DETAIL: This operation is not supported for materialized views. +DROP MATERIALIZED VIEW mv; +-- permissions +MERGE INTO target +USING source2 +ON target.tid = source2.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:90: ERROR: permission denied for table source2 +GRANT INSERT ON target TO regress_merge_no_privs; +SET SESSION AUTHORIZATION regress_merge_no_privs; +MERGE INTO target +USING source2 +ON target.tid = source2.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:99: ERROR: permission denied for table target +GRANT UPDATE ON target2 TO regress_merge_privs; +SET SESSION AUTHORIZATION regress_merge_privs; +MERGE INTO target2 +USING source +ON target2.tid = source.sid +WHEN MATCHED THEN + DELETE; +psql:include/ts_merge_query.sql:108: ERROR: permission denied for table target2 +MERGE INTO target2 +USING source +ON target2.tid = source.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:114: ERROR: permission denied for table target2 +-- check if the target can be accessed from source relation subquery; we should +-- not be able to do so +MERGE INTO target t +USING (SELECT * FROM source WHERE t.tid > sid) s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:122: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 2: USING (SELECT * FROM source WHERE t.tid > sid) s + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +-- +-- initial tests +-- +-- zero rows in source has no effect +MERGE INTO target +USING source +ON target.tid = source.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +ROLLBACK; +-- insert some non-matching source rows to work from +INSERT INTO source VALUES (4, 40); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + DO NOTHING; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (5, 50); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- index plans +INSERT INTO target SELECT generate_series(1000,2500), 0; +ALTER TABLE target ADD PRIMARY KEY (tid); +ANALYZE target; +DELETE FROM target WHERE tid > 100; +ANALYZE target; +-- insert some matching source rows to work from +INSERT INTO source VALUES (2, 5); +INSERT INTO source VALUES (3, 20); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- equivalent of an UPDATE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- equivalent of a DELETE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DO NOTHING; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- duplicate source row causes multiple target row update ERROR +INSERT INTO source VALUES (2, 5); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:241: ERROR: MERGE command cannot affect row a second time +HINT: Ensure that not more than one source row matches any one target row. +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +psql:include/ts_merge_query.sql:249: ERROR: MERGE command cannot affect row a second time +HINT: Ensure that not more than one source row matches any one target row. +ROLLBACK; +-- remove duplicate MATCHED data from source data +DELETE FROM source WHERE sid = 2; +INSERT INTO source VALUES (2, 5); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- duplicate source row on INSERT should fail because of target_pkey +INSERT INTO source VALUES (4, 40); +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); +psql:include/ts_merge_query.sql:265: ERROR: duplicate key value violates unique constraint "target_pkey" +DETAIL: Key (tid)=(4) already exists. +SELECT * FROM target ORDER BY tid; +psql:include/ts_merge_query.sql:266: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- remove duplicate NOT MATCHED data from source data +DELETE FROM source WHERE sid = 4; +INSERT INTO source VALUES (4, 40); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- remove constraints +alter table target drop CONSTRAINT target_pkey; +alter table target alter column tid drop not null; +-- multiple actions +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, 4) +WHEN MATCHED THEN + UPDATE SET balance = 0; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- should be equivalent +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0 +WHEN NOT MATCHED THEN + INSERT VALUES (4, 4); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- column references +-- do a simple equivalent of an UPDATE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance + s.delta; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- do a simple equivalent of an INSERT SELECT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- and again with duplicate source rows +INSERT INTO source VALUES (5, 50); +INSERT INTO source VALUES (5, 50); +-- do a simple equivalent of an INSERT SELECT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- removing duplicate source rows +DELETE FROM source WHERE sid = 5; +-- and again with explicitly identified column list +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- and again with a subtle error: referring to non-existent target row for NOT MATCHED +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (t.tid, s.delta); +psql:include/ts_merge_query.sql:356: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +-- and again with a constant ON clause +BEGIN; +MERGE INTO target t +USING source AS s +ON (SELECT true) +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (t.tid, s.delta); +psql:include/ts_merge_query.sql:364: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +SELECT * FROM target ORDER BY tid; +psql:include/ts_merge_query.sql:365: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- now the classic UPSERT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance + s.delta +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- this time with a FALSE condition +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND FALSE THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +-- this time with an actual condition which returns false +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance <> 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +BEGIN; +-- and now with a condition which returns true +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +ROLLBACK; +-- conditions in the NOT MATCHED clause can only refer to source columns +BEGIN; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND t.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +psql:include/ts_merge_query.sql:408: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 3: WHEN NOT MATCHED AND t.balance = 100 THEN + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +SELECT * FROM wq_target; +psql:include/ts_merge_query.sql:409: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +-- conditions in MATCHED clause can refer to both source and target +SELECT * FROM wq_source; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND s.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check if AND works +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 AND s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 AND s.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check if OR works +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 OR s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 199 OR s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check source-side whole-row references +BEGIN; +MERGE INTO wq_target t +USING wq_source s ON (t.tid = s.sid) +WHEN matched and t = s or t.tid = s.sid THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +ROLLBACK; +-- check if subqueries work in the conditions? +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance > (SELECT max(balance) FROM target) THEN + UPDATE SET balance = t.balance + s.balance; +-- check if we can access system columns in the conditions +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.xmin = t.xmax THEN + UPDATE SET balance = t.balance + s.balance; +psql:include/ts_merge_query.sql:477: ERROR: cannot use system column "xmin" in MERGE WHEN condition +LINE 3: WHEN MATCHED AND t.xmin = t.xmax THEN + ^ +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.tableoid >= 0 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +DROP TABLE wq_target CASCADE; +DROP TABLE wq_source; +-- test triggers +create or replace function merge_trigfunc () returns trigger +language plpgsql as +$$ +DECLARE + line text; +BEGIN + SELECT INTO line format('%s %s %s trigger%s', + TG_WHEN, TG_OP, TG_LEVEL, CASE + WHEN TG_OP = 'INSERT' AND TG_LEVEL = 'ROW' + THEN format(' row: %s', NEW) + WHEN TG_OP = 'UPDATE' AND TG_LEVEL = 'ROW' + THEN format(' row: %s -> %s', OLD, NEW) + WHEN TG_OP = 'DELETE' AND TG_LEVEL = 'ROW' + THEN format(' row: %s', OLD) + END); + + RAISE NOTICE '%', line; + IF (TG_WHEN = 'BEFORE' AND TG_LEVEL = 'ROW') THEN + IF (TG_OP = 'DELETE') THEN + RETURN OLD; + ELSE + RETURN NEW; + END IF; + ELSE + RETURN NULL; + END IF; +END; +$$; +CREATE TRIGGER merge_bsi BEFORE INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bsu BEFORE UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bsd BEFORE DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asi AFTER INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asu AFTER UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asd AFTER DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bri BEFORE INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bru BEFORE UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_brd BEFORE DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_ari AFTER INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_aru AFTER UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_ard AFTER DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +-- now the classic UPSERT, with a DELETE +BEGIN; +UPDATE target SET balance = 0 WHERE tid = 3; +--EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND t.balance > s.delta THEN + UPDATE SET balance = t.balance - s.delta +WHEN MATCHED THEN + DELETE +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- Test behavior of triggers that turn UPDATE/DELETE into no-ops +create or replace function skip_merge_op() returns trigger +language plpgsql as +$$ +BEGIN + RETURN NULL; +END; +$$; +SELECT * FROM target full outer join source on (sid = tid); +create trigger merge_skip BEFORE INSERT OR UPDATE or DELETE + ON target FOR EACH ROW EXECUTE FUNCTION skip_merge_op(); +DO $$ +DECLARE + result integer; +BEGIN +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND s.sid = 3 THEN UPDATE SET balance = t.balance + s.delta +WHEN MATCHED THEN DELETE +WHEN NOT MATCHED THEN INSERT VALUES (sid, delta); +IF FOUND THEN + RAISE NOTICE 'Found'; +ELSE + RAISE NOTICE 'Not found'; +END IF; +GET DIAGNOSTICS result := ROW_COUNT; +RAISE NOTICE 'ROW_COUNT = %', result; +END; +$$; +SELECT * FROM target FULL OUTER JOIN source ON (sid = tid); +DROP TRIGGER merge_skip ON target; +DROP FUNCTION skip_merge_op(); +-- test from PL/pgSQL +-- make sure MERGE INTO isn't interpreted to mean returning variables like SELECT INTO +BEGIN; +DO LANGUAGE plpgsql $$ +BEGIN +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND t.balance > s.delta THEN + UPDATE SET balance = t.balance - s.delta; +END; +$$; +ROLLBACK; +--source constants +BEGIN; +MERGE INTO target t +USING (SELECT 9 AS sid, 57 AS delta) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +--source query +BEGIN; +MERGE INTO target t +USING (SELECT sid, delta FROM source WHERE delta > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING (SELECT sid, delta as newname FROM source WHERE delta > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.newname); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +--self-merge +BEGIN; +MERGE INTO target t1 +USING target t2 +ON t1.tid = t2.tid +WHEN MATCHED THEN + UPDATE SET balance = t1.balance + t2.balance +WHEN NOT MATCHED THEN + INSERT VALUES (t2.tid, t2.balance); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING (SELECT tid as sid, balance as delta FROM target WHERE balance > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING +(SELECT sid, max(delta) AS delta + FROM source + GROUP BY sid + HAVING count(*) = 1 + ORDER BY sid ASC) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- plpgsql parameters and results +BEGIN; +CREATE FUNCTION merge_func (p_id integer, p_bal integer) +RETURNS INTEGER +LANGUAGE plpgsql +AS $$ +DECLARE + result integer; +BEGIN +MERGE INTO target t +USING (SELECT p_id AS sid) AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance - p_bal; +IF FOUND THEN + GET DIAGNOSTICS result := ROW_COUNT; +END IF; +RETURN result; +END; +$$; +SELECT merge_func(3, 4); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- PREPARE +BEGIN; +prepare foom as merge into target t using (select 1 as sid) s on (t.tid = s.sid) when matched then update set balance = 1; +execute foom; +ROLLBACK; +BEGIN; +PREPARE foom2 (integer, integer) AS +MERGE INTO target t +USING (SELECT 1) s +ON t.tid = $1 +WHEN MATCHED THEN +UPDATE SET balance = $2; +--EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +execute foom2 (1, 1); +ROLLBACK; +-- subqueries in source relation +BEGIN; +MERGE INTO sq_target t +USING (SELECT * FROM sq_source) s +ON tid = sid +WHEN MATCHED AND t.balance > delta THEN + UPDATE SET balance = t.balance + delta; +SELECT * FROM sq_target ORDER BY tid; +ROLLBACK; +-- try a view +CREATE VIEW v AS SELECT * FROM sq_source WHERE sid < 2; +BEGIN; +MERGE INTO sq_target +USING v +ON tid = sid +WHEN MATCHED THEN + UPDATE SET balance = v.balance + delta; +SELECT * FROM sq_target ORDER BY tid; +ROLLBACK; +-- ambiguous reference to a column +BEGIN; +MERGE INTO sq_target +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +psql:include/ts_merge_query.sql:732: ERROR: column reference "balance" is ambiguous +LINE 5: UPDATE SET balance = balance + delta + ^ +ROLLBACK; +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +SELECT * FROM sq_target; +ROLLBACK; +-- CTEs +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +WITH targq AS ( + SELECT * FROM v +) +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +ROLLBACK; +-- RETURNING +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE +RETURNING *; +psql:include/ts_merge_query.sql:778: ERROR: syntax error at or near "RETURNING" +LINE 10: RETURNING *; + ^ +ROLLBACK; +-- EXPLAIN +CREATE TABLE ex_mtarget (a int, b int) + WITH (autovacuum_enabled=off); +CREATE TABLE ex_msource (a int, b int) + WITH (autovacuum_enabled=off); +INSERT INTO ex_mtarget SELECT i, i*10 FROM generate_series(1,100,2) i; +INSERT INTO ex_msource SELECT i, i*10 FROM generate_series(1,100,1) i; +CREATE FUNCTION explain_merge(query text) RETURNS SETOF text +LANGUAGE plpgsql AS +$$ +DECLARE ln text; +BEGIN + FOR ln IN + EXECUTE 'explain (analyze, timing off, summary off, costs off) ' || + query + LOOP + ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g'); + RETURN NEXT ln; + END LOOP; +END; +$$; +-- only updates +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED THEN + UPDATE SET b = t.b + 1'); +-- only updates to selected tuples +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1'); +-- updates + deletes +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1 +WHEN MATCHED AND t.a >= 10 AND t.a <= 20 THEN + DELETE'); +-- only inserts +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN NOT MATCHED AND s.a < 10 THEN + INSERT VALUES (a, b)'); +-- all three +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1 +WHEN MATCHED AND t.a >= 30 AND t.a <= 40 THEN + DELETE +WHEN NOT MATCHED AND s.a < 20 THEN + INSERT VALUES (a, b)'); +-- nothing +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a AND t.a < -1000 +WHEN MATCHED AND t.a < 10 THEN + DO NOTHING'); +DROP TABLE ex_msource, ex_mtarget; +DROP FUNCTION explain_merge(text); +-- Subqueries +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED THEN + UPDATE SET balance = (SELECT count(*) FROM sq_target); +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND (SELECT count(*) > 0 FROM sq_target) THEN + UPDATE SET balance = 42; +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid AND (SELECT count(*) > 0 FROM sq_target) +WHEN MATCHED THEN + UPDATE SET balance = 42; +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +DROP TABLE sq_target CASCADE; +DROP TABLE sq_source CASCADE; +CREATE TABLE pa_target (tid integer, balance float, val text) + PARTITION BY LIST (tid); +CREATE TABLE part1 PARTITION OF pa_target FOR VALUES IN (1,4) + WITH (autovacuum_enabled=off); +CREATE TABLE part2 PARTITION OF pa_target FOR VALUES IN (2,5,6) + WITH (autovacuum_enabled=off); +CREATE TABLE part3 PARTITION OF pa_target FOR VALUES IN (3,8,9) + WITH (autovacuum_enabled=off); +CREATE TABLE part4 PARTITION OF pa_target DEFAULT + WITH (autovacuum_enabled=off); +CREATE TABLE pa_source (sid integer, delta float); +-- insert many rows to the source table +INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- same with a constant qual +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND tid = 1 + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- try updating the partition key column +BEGIN; +CREATE FUNCTION merge_func() RETURNS integer LANGUAGE plpgsql AS $$ +DECLARE + result integer; +BEGIN +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +IF FOUND THEN + GET DIAGNOSTICS result := ROW_COUNT; +END IF; +RETURN result; +END; +$$; +SELECT merge_func(); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_target CASCADE; +-- The target table is partitioned in the same way, but this time by attaching +-- partitions which have columns in different order, dropped columns etc. +CREATE TABLE pa_target (tid integer, balance float, val text) + PARTITION BY LIST (tid); +CREATE TABLE part1 (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part2 (balance float, tid integer, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part3 (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part4 (extraid text, tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +ALTER TABLE part4 DROP COLUMN extraid; +ALTER TABLE pa_target ATTACH PARTITION part1 FOR VALUES IN (1,4); +ALTER TABLE pa_target ATTACH PARTITION part2 FOR VALUES IN (2,5,6); +ALTER TABLE pa_target ATTACH PARTITION part3 FOR VALUES IN (3,8,9); +ALTER TABLE pa_target ATTACH PARTITION part4 DEFAULT; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- same with a constant qual +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND tid IN (1, 5) + WHEN MATCHED AND tid % 5 = 0 THEN DELETE + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- try updating the partition key column +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- Sub-partitioning +CREATE TABLE pa_target (logts timestamp, tid integer, balance float, val text) + PARTITION BY RANGE (logts); +CREATE TABLE part_m01 PARTITION OF pa_target + FOR VALUES FROM ('2017-01-01') TO ('2017-02-01') + PARTITION BY LIST (tid); +CREATE TABLE part_m01_odd PARTITION OF part_m01 + FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); +CREATE TABLE part_m01_even PARTITION OF part_m01 + FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); +CREATE TABLE part_m02 PARTITION OF pa_target + FOR VALUES FROM ('2017-02-01') TO ('2017-03-01') + PARTITION BY LIST (tid); +CREATE TABLE part_m02_odd PARTITION OF part_m02 + FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); +CREATE TABLE part_m02_even PARTITION OF part_m02 + FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); +CREATE TABLE pa_source (sid integer, delta float) + WITH (autovacuum_enabled=off); +-- insert many rows to the source table +INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT '2017-01-31', id, id * 100, 'initial' FROM generate_series(1,9,3) AS id; +INSERT INTO pa_target SELECT '2017-02-28', id, id * 100, 'initial' FROM generate_series(2,9,3) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING (SELECT '2017-01-15' AS slogts, * FROM pa_source WHERE sid < 10) s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (slogts::timestamp, sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- some complex joins on the source side +-- source relation is an unaliased join +MERGE INTO cj_target t +USING cj_source1 s1 + INNER JOIN cj_source2 s2 ON sid1 = sid2 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid1, delta, sval); +-- try accessing columns from either side of the source join +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid2, delta, sval) +WHEN MATCHED THEN + DELETE; +-- some simple expressions in INSERT targetlist +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid2, delta + scat, sval) +WHEN MATCHED THEN + UPDATE SET val = val || ' updated by merge'; +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 +ON t.tid = sid1 +WHEN MATCHED THEN + UPDATE SET val = val || ' ' || delta::text; +SELECT * FROM cj_target ORDER BY tid; +ALTER TABLE cj_source1 RENAME COLUMN sid1 TO sid; +ALTER TABLE cj_source2 RENAME COLUMN sid2 TO sid; +TRUNCATE cj_target; +MERGE INTO cj_target t +USING cj_source1 s1 + INNER JOIN cj_source2 s2 ON s1.sid = s2.sid +ON t.tid = s1.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s2.sid, delta, sval); +DROP TABLE cj_source2, cj_source1; +DROP TABLE cj_target CASCADE; +-- Function scans +MERGE INTO fs_target t +USING generate_series(1,100,1) AS id +ON t.a = id +WHEN MATCHED THEN + UPDATE SET b = b + id +WHEN NOT MATCHED THEN + INSERT VALUES (id, -1); +MERGE INTO fs_target t +USING generate_series(1,100,2) AS id +ON t.a = id +WHEN MATCHED THEN + UPDATE SET b = b + id, c = 'updated '|| id.*::text +WHEN NOT MATCHED THEN + INSERT VALUES (id, -1, 'inserted ' || id.*::text); +SELECT count(*) FROM fs_target; +DROP TABLE fs_target CASCADE; +-- SERIALIZABLE test +-- handled in isolation tests +-- Inheritance-based partitioning +CREATE TABLE measurement ( + city_id int not null, + logdate date not null, + peaktemp int, + unitsales int +) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2006m02 ( + CHECK ( logdate >= DATE '2006-02-01' AND logdate < DATE '2006-03-01' ) +) INHERITS (measurement) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2006m03 ( + CHECK ( logdate >= DATE '2006-03-01' AND logdate < DATE '2006-04-01' ) +) INHERITS (measurement) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2007m01 ( + filler text, + peaktemp int, + logdate date not null, + city_id int not null, + unitsales int + CHECK ( logdate >= DATE '2007-01-01' AND logdate < DATE '2007-02-01') +) WITH (autovacuum_enabled=off); +ALTER TABLE measurement_y2007m01 DROP COLUMN filler; +ALTER TABLE measurement_y2007m01 INHERIT measurement; +INSERT INTO measurement VALUES (0, '2005-07-21', 5, 15); +CREATE OR REPLACE FUNCTION measurement_insert_trigger() +RETURNS TRIGGER AS $$ +BEGIN + IF ( NEW.logdate >= DATE '2006-02-01' AND + NEW.logdate < DATE '2006-03-01' ) THEN + INSERT INTO measurement_y2006m02 VALUES (NEW.*); + ELSIF ( NEW.logdate >= DATE '2006-03-01' AND + NEW.logdate < DATE '2006-04-01' ) THEN + INSERT INTO measurement_y2006m03 VALUES (NEW.*); + ELSIF ( NEW.logdate >= DATE '2007-01-01' AND + NEW.logdate < DATE '2007-02-01' ) THEN + INSERT INTO measurement_y2007m01 (city_id, logdate, peaktemp, unitsales) + VALUES (NEW.*); + ELSE + RAISE EXCEPTION 'Date out of range. Fix the measurement_insert_trigger() function!'; + END IF; + RETURN NULL; +END; +$$ LANGUAGE plpgsql ; +CREATE TRIGGER insert_measurement_trigger + BEFORE INSERT ON measurement + FOR EACH ROW EXECUTE PROCEDURE measurement_insert_trigger(); +INSERT INTO measurement VALUES (1, '2006-02-10', 35, 10); +INSERT INTO measurement VALUES (1, '2006-02-16', 45, 20); +INSERT INTO measurement VALUES (1, '2006-03-17', 25, 10); +INSERT INTO measurement VALUES (1, '2006-03-27', 15, 40); +INSERT INTO measurement VALUES (1, '2007-01-15', 10, 10); +INSERT INTO measurement VALUES (1, '2007-01-17', 10, 10); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; +CREATE TABLE new_measurement (LIKE measurement) WITH (autovacuum_enabled=off); +INSERT INTO new_measurement VALUES (0, '2005-07-21', 25, 20); +INSERT INTO new_measurement VALUES (1, '2006-03-01', 20, 10); +INSERT INTO new_measurement VALUES (1, '2006-02-16', 50, 10); +INSERT INTO new_measurement VALUES (2, '2006-02-10', 20, 20); +INSERT INTO new_measurement VALUES (1, '2006-03-27', NULL, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-17', NULL, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-15', 5, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-16', 10, 10); +BEGIN; +MERGE INTO ONLY measurement m + USING new_measurement nm ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE +WHEN MATCHED THEN UPDATE + SET peaktemp = greatest(m.peaktemp, nm.peaktemp), + unitsales = m.unitsales + coalesce(nm.unitsales, 0) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id, logdate, peaktemp, unitsales); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate, peaktemp; +ROLLBACK; +MERGE into measurement m + USING new_measurement nm ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE +WHEN MATCHED THEN UPDATE + SET peaktemp = greatest(m.peaktemp, nm.peaktemp), + unitsales = m.unitsales + coalesce(nm.unitsales, 0) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id, logdate, peaktemp, unitsales); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; +BEGIN; +MERGE INTO new_measurement nm + USING ONLY measurement m ON + (nm.city_id = m.city_id and nm.logdate=m.logdate) +WHEN MATCHED THEN DELETE; +SELECT * FROM new_measurement ORDER BY city_id, logdate; +ROLLBACK; +MERGE INTO new_measurement nm + USING measurement m ON + (nm.city_id = m.city_id and nm.logdate=m.logdate) +WHEN MATCHED THEN DELETE; +SELECT * FROM new_measurement ORDER BY city_id, logdate; +DROP TABLE measurement, new_measurement CASCADE; +DROP FUNCTION measurement_insert_trigger(); +RESET SESSION AUTHORIZATION; +DROP TABLE target CASCADE; +DROP TABLE target2 CASCADE; +DROP TABLE source, source2; +DROP FUNCTION merge_trigfunc(); +REVOKE CREATE ON SCHEMA public FROM regress_merge_privs; +DROP USER regress_merge_privs; +DROP USER regress_merge_no_privs; +\o +\ir :TEST_LOAD_HT_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE USER regress_merge_privs; +CREATE USER regress_merge_no_privs; +DROP TABLE IF EXISTS target; +DROP TABLE IF EXISTS source; +CREATE TABLE target (tid integer, balance integer) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('target', 'tid', chunk_time_interval => 3); + create_hypertable +--------------------- + (1,public,target,t) +(1 row) + +CREATE TABLE source (sid integer, delta integer) -- no index + WITH (autovacuum_enabled=off); +INSERT INTO target VALUES (1, 10); +INSERT INTO target VALUES (2, 20); +INSERT INTO target VALUES (3, 30); +SELECT t.ctid is not null as matched, t.*, s.* FROM source s FULL OUTER JOIN target t ON s.sid = t.tid ORDER BY t.tid, s.sid; + matched | tid | balance | sid | delta +---------+-----+---------+-----+------- + t | 1 | 10 | | + t | 2 | 20 | | + t | 3 | 30 | | +(3 rows) + +ALTER TABLE target OWNER TO regress_merge_privs; +ALTER TABLE source OWNER TO regress_merge_privs; +CREATE TABLE target2 (tid integer, balance integer) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('target2', 'tid', chunk_time_interval => 3); + create_hypertable +---------------------- + (2,public,target2,t) +(1 row) + +CREATE TABLE source2 (sid integer, delta integer) + WITH (autovacuum_enabled=off); +ALTER TABLE target2 OWNER TO regress_merge_no_privs; +ALTER TABLE source2 OWNER TO regress_merge_no_privs; +GRANT INSERT ON target TO regress_merge_no_privs; +GRANT CREATE ON SCHEMA public TO regress_merge_privs; +SET SESSION AUTHORIZATION regress_merge_privs; +CREATE TABLE sq_target (tid integer NOT NULL, balance integer) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('sq_target', 'tid', chunk_time_interval => 3); + create_hypertable +------------------------ + (3,public,sq_target,t) +(1 row) + +CREATE TABLE sq_source (delta integer, sid integer, balance integer DEFAULT 0) + WITH (autovacuum_enabled=off); +INSERT INTO sq_target(tid, balance) VALUES (1,100), (2,200), (3,300); +INSERT INTO sq_source(sid, delta) VALUES (1,10), (2,20), (4,40); +-- conditional WHEN clause +CREATE TABLE wq_target (tid integer not null, balance integer DEFAULT -1) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('wq_target', 'tid', chunk_time_interval => 3); + create_hypertable +------------------------ + (4,public,wq_target,t) +(1 row) + +CREATE TABLE wq_source (balance integer, sid integer) + WITH (autovacuum_enabled=off); +INSERT INTO wq_source (sid, balance) VALUES (1, 100); +-- some complex joins on the source side +CREATE TABLE cj_target (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('cj_target', 'tid', chunk_time_interval => 3); + create_hypertable +------------------------ + (5,public,cj_target,t) +(1 row) + +CREATE TABLE cj_source1 (sid1 integer, scat integer, delta integer) + WITH (autovacuum_enabled=off); +CREATE TABLE cj_source2 (sid2 integer, sval text) + WITH (autovacuum_enabled=off); +INSERT INTO cj_source1 VALUES (1, 10, 100); +INSERT INTO cj_source1 VALUES (1, 20, 200); +INSERT INTO cj_source1 VALUES (2, 20, 300); +INSERT INTO cj_source1 VALUES (3, 10, 400); +INSERT INTO cj_source2 VALUES (1, 'initial source2'); +INSERT INTO cj_source2 VALUES (2, 'initial source2'); +INSERT INTO cj_source2 VALUES (3, 'initial source2'); +CREATE TABLE fs_target (a int, b int, c text) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('fs_target', 'a', chunk_time_interval => 3); + create_hypertable +------------------------ + (6,public,fs_target,t) +(1 row) + +-- run tests on hypertable +\o :TEST_RESULTS_WITH_HYPERTABLE +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- +-- Errors +-- +MERGE INTO target t RANDOMWORD +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:12: ERROR: syntax error at or near "RANDOMWORD" +LINE 1: MERGE INTO target t RANDOMWORD + ^ +-- MATCHED/INSERT error +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:18: ERROR: syntax error at or near "INSERT" +LINE 5: INSERT DEFAULT VALUES; + ^ +-- incorrectly specifying INTO target +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT INTO target DEFAULT VALUES; +psql:include/ts_merge_query.sql:24: ERROR: syntax error at or near "INTO" +LINE 5: INSERT INTO target DEFAULT VALUES; + ^ +-- Multiple VALUES clause +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (1,1), (2,2); +psql:include/ts_merge_query.sql:30: ERROR: syntax error at or near "," +LINE 5: INSERT VALUES (1,1), (2,2); + ^ +-- SELECT query for INSERT +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT SELECT (1, 1); +psql:include/ts_merge_query.sql:36: ERROR: syntax error at or near "SELECT" +LINE 5: INSERT SELECT (1, 1); + ^ +-- NOT MATCHED/UPDATE +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:42: ERROR: syntax error at or near "UPDATE" +LINE 5: UPDATE SET balance = 0; + ^ +-- UPDATE tablename +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE target SET balance = 0; +psql:include/ts_merge_query.sql:48: ERROR: syntax error at or near "target" +LINE 5: UPDATE target SET balance = 0; + ^ +-- source and target names the same +MERGE INTO target +USING target +ON tid = tid +WHEN MATCHED THEN DO NOTHING; +psql:include/ts_merge_query.sql:53: ERROR: name "target" specified more than once +DETAIL: The name is used both as MERGE target table and data source. +-- used in a CTE +WITH foo AS ( + MERGE INTO target USING source ON (true) + WHEN MATCHED THEN DELETE +) SELECT * FROM foo; +psql:include/ts_merge_query.sql:58: ERROR: MERGE not supported in WITH query +LINE 1: WITH foo AS ( + ^ +-- used in COPY +COPY ( + MERGE INTO target USING source ON (true) + WHEN MATCHED THEN DELETE +) TO stdout; +psql:include/ts_merge_query.sql:63: ERROR: MERGE not supported in COPY +-- unsupported relation types +-- view +CREATE VIEW tv AS SELECT * FROM target; +MERGE INTO tv t +USING source s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:72: ERROR: cannot execute MERGE on relation "tv" +DETAIL: This operation is not supported for views. +DROP VIEW tv; +-- materialized view +CREATE MATERIALIZED VIEW mv AS SELECT * FROM target; +MERGE INTO mv t +USING source s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:81: ERROR: cannot execute MERGE on relation "mv" +DETAIL: This operation is not supported for materialized views. +DROP MATERIALIZED VIEW mv; +-- permissions +MERGE INTO target +USING source2 +ON target.tid = source2.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:90: ERROR: permission denied for table source2 +GRANT INSERT ON target TO regress_merge_no_privs; +SET SESSION AUTHORIZATION regress_merge_no_privs; +MERGE INTO target +USING source2 +ON target.tid = source2.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:99: ERROR: permission denied for table target +GRANT UPDATE ON target2 TO regress_merge_privs; +SET SESSION AUTHORIZATION regress_merge_privs; +MERGE INTO target2 +USING source +ON target2.tid = source.sid +WHEN MATCHED THEN + DELETE; +psql:include/ts_merge_query.sql:108: ERROR: permission denied for table target2 +MERGE INTO target2 +USING source +ON target2.tid = source.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:114: ERROR: permission denied for table target2 +-- check if the target can be accessed from source relation subquery; we should +-- not be able to do so +MERGE INTO target t +USING (SELECT * FROM source WHERE t.tid > sid) s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:122: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 2: USING (SELECT * FROM source WHERE t.tid > sid) s + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +-- +-- initial tests +-- +-- zero rows in source has no effect +MERGE INTO target +USING source +ON target.tid = source.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +ROLLBACK; +-- insert some non-matching source rows to work from +INSERT INTO source VALUES (4, 40); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + DO NOTHING; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (5, 50); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- index plans +INSERT INTO target SELECT generate_series(1000,2500), 0; +ALTER TABLE target ADD PRIMARY KEY (tid); +ANALYZE target; +DELETE FROM target WHERE tid > 100; +ANALYZE target; +-- insert some matching source rows to work from +INSERT INTO source VALUES (2, 5); +INSERT INTO source VALUES (3, 20); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- equivalent of an UPDATE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- equivalent of a DELETE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DO NOTHING; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- duplicate source row causes multiple target row update ERROR +INSERT INTO source VALUES (2, 5); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:241: ERROR: MERGE command cannot affect row a second time +HINT: Ensure that not more than one source row matches any one target row. +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +psql:include/ts_merge_query.sql:249: ERROR: MERGE command cannot affect row a second time +HINT: Ensure that not more than one source row matches any one target row. +ROLLBACK; +-- remove duplicate MATCHED data from source data +DELETE FROM source WHERE sid = 2; +INSERT INTO source VALUES (2, 5); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- duplicate source row on INSERT should fail because of target_pkey +INSERT INTO source VALUES (4, 40); +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); +psql:include/ts_merge_query.sql:265: ERROR: duplicate key value violates unique constraint "2_2_target_pkey" +DETAIL: Key (tid)=(4) already exists. +SELECT * FROM target ORDER BY tid; +psql:include/ts_merge_query.sql:266: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- remove duplicate NOT MATCHED data from source data +DELETE FROM source WHERE sid = 4; +INSERT INTO source VALUES (4, 40); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- remove constraints +alter table target drop CONSTRAINT target_pkey; +alter table target alter column tid drop not null; +psql:include/ts_merge_query.sql:277: ERROR: cannot drop not-null constraint from a time-partitioned column +-- multiple actions +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, 4) +WHEN MATCHED THEN + UPDATE SET balance = 0; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- should be equivalent +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0 +WHEN NOT MATCHED THEN + INSERT VALUES (4, 4); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- column references +-- do a simple equivalent of an UPDATE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance + s.delta; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- do a simple equivalent of an INSERT SELECT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- and again with duplicate source rows +INSERT INTO source VALUES (5, 50); +INSERT INTO source VALUES (5, 50); +-- do a simple equivalent of an INSERT SELECT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- removing duplicate source rows +DELETE FROM source WHERE sid = 5; +-- and again with explicitly identified column list +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- and again with a subtle error: referring to non-existent target row for NOT MATCHED +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (t.tid, s.delta); +psql:include/ts_merge_query.sql:356: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +-- and again with a constant ON clause +BEGIN; +MERGE INTO target t +USING source AS s +ON (SELECT true) +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (t.tid, s.delta); +psql:include/ts_merge_query.sql:364: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +SELECT * FROM target ORDER BY tid; +psql:include/ts_merge_query.sql:365: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- now the classic UPSERT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance + s.delta +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- this time with a FALSE condition +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND FALSE THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +-- this time with an actual condition which returns false +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance <> 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +BEGIN; +-- and now with a condition which returns true +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +ROLLBACK; +-- conditions in the NOT MATCHED clause can only refer to source columns +BEGIN; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND t.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +psql:include/ts_merge_query.sql:408: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 3: WHEN NOT MATCHED AND t.balance = 100 THEN + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +SELECT * FROM wq_target; +psql:include/ts_merge_query.sql:409: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +-- conditions in MATCHED clause can refer to both source and target +SELECT * FROM wq_source; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND s.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check if AND works +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 AND s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 AND s.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check if OR works +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 OR s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 199 OR s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check source-side whole-row references +BEGIN; +MERGE INTO wq_target t +USING wq_source s ON (t.tid = s.sid) +WHEN matched and t = s or t.tid = s.sid THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +ROLLBACK; +-- check if subqueries work in the conditions? +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance > (SELECT max(balance) FROM target) THEN + UPDATE SET balance = t.balance + s.balance; +-- check if we can access system columns in the conditions +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.xmin = t.xmax THEN + UPDATE SET balance = t.balance + s.balance; +psql:include/ts_merge_query.sql:477: ERROR: cannot use system column "xmin" in MERGE WHEN condition +LINE 3: WHEN MATCHED AND t.xmin = t.xmax THEN + ^ +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.tableoid >= 0 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +DROP TABLE wq_target CASCADE; +DROP TABLE wq_source; +-- test triggers +create or replace function merge_trigfunc () returns trigger +language plpgsql as +$$ +DECLARE + line text; +BEGIN + SELECT INTO line format('%s %s %s trigger%s', + TG_WHEN, TG_OP, TG_LEVEL, CASE + WHEN TG_OP = 'INSERT' AND TG_LEVEL = 'ROW' + THEN format(' row: %s', NEW) + WHEN TG_OP = 'UPDATE' AND TG_LEVEL = 'ROW' + THEN format(' row: %s -> %s', OLD, NEW) + WHEN TG_OP = 'DELETE' AND TG_LEVEL = 'ROW' + THEN format(' row: %s', OLD) + END); + + RAISE NOTICE '%', line; + IF (TG_WHEN = 'BEFORE' AND TG_LEVEL = 'ROW') THEN + IF (TG_OP = 'DELETE') THEN + RETURN OLD; + ELSE + RETURN NEW; + END IF; + ELSE + RETURN NULL; + END IF; +END; +$$; +CREATE TRIGGER merge_bsi BEFORE INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bsu BEFORE UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bsd BEFORE DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asi AFTER INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asu AFTER UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asd AFTER DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bri BEFORE INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bru BEFORE UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_brd BEFORE DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_ari AFTER INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_aru AFTER UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_ard AFTER DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +-- now the classic UPSERT, with a DELETE +BEGIN; +UPDATE target SET balance = 0 WHERE tid = 3; +--EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND t.balance > s.delta THEN + UPDATE SET balance = t.balance - s.delta +WHEN MATCHED THEN + DELETE +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- Test behavior of triggers that turn UPDATE/DELETE into no-ops +create or replace function skip_merge_op() returns trigger +language plpgsql as +$$ +BEGIN + RETURN NULL; +END; +$$; +SELECT * FROM target full outer join source on (sid = tid); +create trigger merge_skip BEFORE INSERT OR UPDATE or DELETE + ON target FOR EACH ROW EXECUTE FUNCTION skip_merge_op(); +DO $$ +DECLARE + result integer; +BEGIN +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND s.sid = 3 THEN UPDATE SET balance = t.balance + s.delta +WHEN MATCHED THEN DELETE +WHEN NOT MATCHED THEN INSERT VALUES (sid, delta); +IF FOUND THEN + RAISE NOTICE 'Found'; +ELSE + RAISE NOTICE 'Not found'; +END IF; +GET DIAGNOSTICS result := ROW_COUNT; +RAISE NOTICE 'ROW_COUNT = %', result; +END; +$$; +SELECT * FROM target FULL OUTER JOIN source ON (sid = tid); +DROP TRIGGER merge_skip ON target; +DROP FUNCTION skip_merge_op(); +-- test from PL/pgSQL +-- make sure MERGE INTO isn't interpreted to mean returning variables like SELECT INTO +BEGIN; +DO LANGUAGE plpgsql $$ +BEGIN +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND t.balance > s.delta THEN + UPDATE SET balance = t.balance - s.delta; +END; +$$; +ROLLBACK; +--source constants +BEGIN; +MERGE INTO target t +USING (SELECT 9 AS sid, 57 AS delta) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +--source query +BEGIN; +MERGE INTO target t +USING (SELECT sid, delta FROM source WHERE delta > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING (SELECT sid, delta as newname FROM source WHERE delta > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.newname); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +--self-merge +BEGIN; +MERGE INTO target t1 +USING target t2 +ON t1.tid = t2.tid +WHEN MATCHED THEN + UPDATE SET balance = t1.balance + t2.balance +WHEN NOT MATCHED THEN + INSERT VALUES (t2.tid, t2.balance); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING (SELECT tid as sid, balance as delta FROM target WHERE balance > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING +(SELECT sid, max(delta) AS delta + FROM source + GROUP BY sid + HAVING count(*) = 1 + ORDER BY sid ASC) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- plpgsql parameters and results +BEGIN; +CREATE FUNCTION merge_func (p_id integer, p_bal integer) +RETURNS INTEGER +LANGUAGE plpgsql +AS $$ +DECLARE + result integer; +BEGIN +MERGE INTO target t +USING (SELECT p_id AS sid) AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance - p_bal; +IF FOUND THEN + GET DIAGNOSTICS result := ROW_COUNT; +END IF; +RETURN result; +END; +$$; +SELECT merge_func(3, 4); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- PREPARE +BEGIN; +prepare foom as merge into target t using (select 1 as sid) s on (t.tid = s.sid) when matched then update set balance = 1; +psql:include/ts_merge_query.sql:685: ERROR: prepared statement "foom" already exists +execute foom; +psql:include/ts_merge_query.sql:686: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +BEGIN; +PREPARE foom2 (integer, integer) AS +MERGE INTO target t +USING (SELECT 1) s +ON t.tid = $1 +WHEN MATCHED THEN +UPDATE SET balance = $2; +psql:include/ts_merge_query.sql:695: ERROR: prepared statement "foom2" already exists +--EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +execute foom2 (1, 1); +psql:include/ts_merge_query.sql:697: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- subqueries in source relation +BEGIN; +MERGE INTO sq_target t +USING (SELECT * FROM sq_source) s +ON tid = sid +WHEN MATCHED AND t.balance > delta THEN + UPDATE SET balance = t.balance + delta; +SELECT * FROM sq_target ORDER BY tid; +ROLLBACK; +-- try a view +CREATE VIEW v AS SELECT * FROM sq_source WHERE sid < 2; +BEGIN; +MERGE INTO sq_target +USING v +ON tid = sid +WHEN MATCHED THEN + UPDATE SET balance = v.balance + delta; +SELECT * FROM sq_target ORDER BY tid; +ROLLBACK; +-- ambiguous reference to a column +BEGIN; +MERGE INTO sq_target +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +psql:include/ts_merge_query.sql:732: ERROR: column reference "balance" is ambiguous +LINE 5: UPDATE SET balance = balance + delta + ^ +ROLLBACK; +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +SELECT * FROM sq_target; +ROLLBACK; +-- CTEs +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +WITH targq AS ( + SELECT * FROM v +) +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +ROLLBACK; +-- RETURNING +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE +RETURNING *; +psql:include/ts_merge_query.sql:778: ERROR: syntax error at or near "RETURNING" +LINE 10: RETURNING *; + ^ +ROLLBACK; +-- EXPLAIN +CREATE TABLE ex_mtarget (a int, b int) + WITH (autovacuum_enabled=off); +CREATE TABLE ex_msource (a int, b int) + WITH (autovacuum_enabled=off); +INSERT INTO ex_mtarget SELECT i, i*10 FROM generate_series(1,100,2) i; +INSERT INTO ex_msource SELECT i, i*10 FROM generate_series(1,100,1) i; +CREATE FUNCTION explain_merge(query text) RETURNS SETOF text +LANGUAGE plpgsql AS +$$ +DECLARE ln text; +BEGIN + FOR ln IN + EXECUTE 'explain (analyze, timing off, summary off, costs off) ' || + query + LOOP + ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g'); + RETURN NEXT ln; + END LOOP; +END; +$$; +-- only updates +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED THEN + UPDATE SET b = t.b + 1'); +-- only updates to selected tuples +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1'); +-- updates + deletes +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1 +WHEN MATCHED AND t.a >= 10 AND t.a <= 20 THEN + DELETE'); +-- only inserts +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN NOT MATCHED AND s.a < 10 THEN + INSERT VALUES (a, b)'); +-- all three +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1 +WHEN MATCHED AND t.a >= 30 AND t.a <= 40 THEN + DELETE +WHEN NOT MATCHED AND s.a < 20 THEN + INSERT VALUES (a, b)'); +-- nothing +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a AND t.a < -1000 +WHEN MATCHED AND t.a < 10 THEN + DO NOTHING'); +DROP TABLE ex_msource, ex_mtarget; +DROP FUNCTION explain_merge(text); +-- Subqueries +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED THEN + UPDATE SET balance = (SELECT count(*) FROM sq_target); +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND (SELECT count(*) > 0 FROM sq_target) THEN + UPDATE SET balance = 42; +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid AND (SELECT count(*) > 0 FROM sq_target) +WHEN MATCHED THEN + UPDATE SET balance = 42; +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +DROP TABLE sq_target CASCADE; +DROP TABLE sq_source CASCADE; +CREATE TABLE pa_target (tid integer, balance float, val text) + PARTITION BY LIST (tid); +CREATE TABLE part1 PARTITION OF pa_target FOR VALUES IN (1,4) + WITH (autovacuum_enabled=off); +CREATE TABLE part2 PARTITION OF pa_target FOR VALUES IN (2,5,6) + WITH (autovacuum_enabled=off); +CREATE TABLE part3 PARTITION OF pa_target FOR VALUES IN (3,8,9) + WITH (autovacuum_enabled=off); +CREATE TABLE part4 PARTITION OF pa_target DEFAULT + WITH (autovacuum_enabled=off); +CREATE TABLE pa_source (sid integer, delta float); +-- insert many rows to the source table +INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- same with a constant qual +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND tid = 1 + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- try updating the partition key column +BEGIN; +CREATE FUNCTION merge_func() RETURNS integer LANGUAGE plpgsql AS $$ +DECLARE + result integer; +BEGIN +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +IF FOUND THEN + GET DIAGNOSTICS result := ROW_COUNT; +END IF; +RETURN result; +END; +$$; +SELECT merge_func(); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_target CASCADE; +-- The target table is partitioned in the same way, but this time by attaching +-- partitions which have columns in different order, dropped columns etc. +CREATE TABLE pa_target (tid integer, balance float, val text) + PARTITION BY LIST (tid); +CREATE TABLE part1 (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part2 (balance float, tid integer, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part3 (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part4 (extraid text, tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +ALTER TABLE part4 DROP COLUMN extraid; +ALTER TABLE pa_target ATTACH PARTITION part1 FOR VALUES IN (1,4); +ALTER TABLE pa_target ATTACH PARTITION part2 FOR VALUES IN (2,5,6); +ALTER TABLE pa_target ATTACH PARTITION part3 FOR VALUES IN (3,8,9); +ALTER TABLE pa_target ATTACH PARTITION part4 DEFAULT; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- same with a constant qual +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND tid IN (1, 5) + WHEN MATCHED AND tid % 5 = 0 THEN DELETE + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- try updating the partition key column +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- Sub-partitioning +CREATE TABLE pa_target (logts timestamp, tid integer, balance float, val text) + PARTITION BY RANGE (logts); +CREATE TABLE part_m01 PARTITION OF pa_target + FOR VALUES FROM ('2017-01-01') TO ('2017-02-01') + PARTITION BY LIST (tid); +CREATE TABLE part_m01_odd PARTITION OF part_m01 + FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); +CREATE TABLE part_m01_even PARTITION OF part_m01 + FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); +CREATE TABLE part_m02 PARTITION OF pa_target + FOR VALUES FROM ('2017-02-01') TO ('2017-03-01') + PARTITION BY LIST (tid); +CREATE TABLE part_m02_odd PARTITION OF part_m02 + FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); +CREATE TABLE part_m02_even PARTITION OF part_m02 + FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); +CREATE TABLE pa_source (sid integer, delta float) + WITH (autovacuum_enabled=off); +-- insert many rows to the source table +INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT '2017-01-31', id, id * 100, 'initial' FROM generate_series(1,9,3) AS id; +INSERT INTO pa_target SELECT '2017-02-28', id, id * 100, 'initial' FROM generate_series(2,9,3) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING (SELECT '2017-01-15' AS slogts, * FROM pa_source WHERE sid < 10) s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (slogts::timestamp, sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- some complex joins on the source side +-- source relation is an unaliased join +MERGE INTO cj_target t +USING cj_source1 s1 + INNER JOIN cj_source2 s2 ON sid1 = sid2 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid1, delta, sval); +-- try accessing columns from either side of the source join +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid2, delta, sval) +WHEN MATCHED THEN + DELETE; +-- some simple expressions in INSERT targetlist +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid2, delta + scat, sval) +WHEN MATCHED THEN + UPDATE SET val = val || ' updated by merge'; +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 +ON t.tid = sid1 +WHEN MATCHED THEN + UPDATE SET val = val || ' ' || delta::text; +SELECT * FROM cj_target ORDER BY tid; +ALTER TABLE cj_source1 RENAME COLUMN sid1 TO sid; +ALTER TABLE cj_source2 RENAME COLUMN sid2 TO sid; +TRUNCATE cj_target; +MERGE INTO cj_target t +USING cj_source1 s1 + INNER JOIN cj_source2 s2 ON s1.sid = s2.sid +ON t.tid = s1.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s2.sid, delta, sval); +DROP TABLE cj_source2, cj_source1; +DROP TABLE cj_target CASCADE; +-- Function scans +MERGE INTO fs_target t +USING generate_series(1,100,1) AS id +ON t.a = id +WHEN MATCHED THEN + UPDATE SET b = b + id +WHEN NOT MATCHED THEN + INSERT VALUES (id, -1); +MERGE INTO fs_target t +USING generate_series(1,100,2) AS id +ON t.a = id +WHEN MATCHED THEN + UPDATE SET b = b + id, c = 'updated '|| id.*::text +WHEN NOT MATCHED THEN + INSERT VALUES (id, -1, 'inserted ' || id.*::text); +SELECT count(*) FROM fs_target; +DROP TABLE fs_target CASCADE; +-- SERIALIZABLE test +-- handled in isolation tests +-- Inheritance-based partitioning +CREATE TABLE measurement ( + city_id int not null, + logdate date not null, + peaktemp int, + unitsales int +) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2006m02 ( + CHECK ( logdate >= DATE '2006-02-01' AND logdate < DATE '2006-03-01' ) +) INHERITS (measurement) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2006m03 ( + CHECK ( logdate >= DATE '2006-03-01' AND logdate < DATE '2006-04-01' ) +) INHERITS (measurement) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2007m01 ( + filler text, + peaktemp int, + logdate date not null, + city_id int not null, + unitsales int + CHECK ( logdate >= DATE '2007-01-01' AND logdate < DATE '2007-02-01') +) WITH (autovacuum_enabled=off); +ALTER TABLE measurement_y2007m01 DROP COLUMN filler; +ALTER TABLE measurement_y2007m01 INHERIT measurement; +INSERT INTO measurement VALUES (0, '2005-07-21', 5, 15); +CREATE OR REPLACE FUNCTION measurement_insert_trigger() +RETURNS TRIGGER AS $$ +BEGIN + IF ( NEW.logdate >= DATE '2006-02-01' AND + NEW.logdate < DATE '2006-03-01' ) THEN + INSERT INTO measurement_y2006m02 VALUES (NEW.*); + ELSIF ( NEW.logdate >= DATE '2006-03-01' AND + NEW.logdate < DATE '2006-04-01' ) THEN + INSERT INTO measurement_y2006m03 VALUES (NEW.*); + ELSIF ( NEW.logdate >= DATE '2007-01-01' AND + NEW.logdate < DATE '2007-02-01' ) THEN + INSERT INTO measurement_y2007m01 (city_id, logdate, peaktemp, unitsales) + VALUES (NEW.*); + ELSE + RAISE EXCEPTION 'Date out of range. Fix the measurement_insert_trigger() function!'; + END IF; + RETURN NULL; +END; +$$ LANGUAGE plpgsql ; +CREATE TRIGGER insert_measurement_trigger + BEFORE INSERT ON measurement + FOR EACH ROW EXECUTE PROCEDURE measurement_insert_trigger(); +INSERT INTO measurement VALUES (1, '2006-02-10', 35, 10); +INSERT INTO measurement VALUES (1, '2006-02-16', 45, 20); +INSERT INTO measurement VALUES (1, '2006-03-17', 25, 10); +INSERT INTO measurement VALUES (1, '2006-03-27', 15, 40); +INSERT INTO measurement VALUES (1, '2007-01-15', 10, 10); +INSERT INTO measurement VALUES (1, '2007-01-17', 10, 10); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; +CREATE TABLE new_measurement (LIKE measurement) WITH (autovacuum_enabled=off); +INSERT INTO new_measurement VALUES (0, '2005-07-21', 25, 20); +INSERT INTO new_measurement VALUES (1, '2006-03-01', 20, 10); +INSERT INTO new_measurement VALUES (1, '2006-02-16', 50, 10); +INSERT INTO new_measurement VALUES (2, '2006-02-10', 20, 20); +INSERT INTO new_measurement VALUES (1, '2006-03-27', NULL, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-17', NULL, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-15', 5, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-16', 10, 10); +BEGIN; +MERGE INTO ONLY measurement m + USING new_measurement nm ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE +WHEN MATCHED THEN UPDATE + SET peaktemp = greatest(m.peaktemp, nm.peaktemp), + unitsales = m.unitsales + coalesce(nm.unitsales, 0) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id, logdate, peaktemp, unitsales); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate, peaktemp; +ROLLBACK; +MERGE into measurement m + USING new_measurement nm ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE +WHEN MATCHED THEN UPDATE + SET peaktemp = greatest(m.peaktemp, nm.peaktemp), + unitsales = m.unitsales + coalesce(nm.unitsales, 0) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id, logdate, peaktemp, unitsales); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; +BEGIN; +MERGE INTO new_measurement nm + USING ONLY measurement m ON + (nm.city_id = m.city_id and nm.logdate=m.logdate) +WHEN MATCHED THEN DELETE; +SELECT * FROM new_measurement ORDER BY city_id, logdate; +ROLLBACK; +MERGE INTO new_measurement nm + USING measurement m ON + (nm.city_id = m.city_id and nm.logdate=m.logdate) +WHEN MATCHED THEN DELETE; +SELECT * FROM new_measurement ORDER BY city_id, logdate; +DROP TABLE measurement, new_measurement CASCADE; +DROP FUNCTION measurement_insert_trigger(); +RESET SESSION AUTHORIZATION; +DROP TABLE target CASCADE; +DROP TABLE target2 CASCADE; +DROP TABLE source, source2; +DROP FUNCTION merge_trigfunc(); +REVOKE CREATE ON SCHEMA public FROM regress_merge_privs; +DROP USER regress_merge_privs; +DROP USER regress_merge_no_privs; +\o +:DIFF_CMD diff --git a/test/expected/ts_merge-15.out b/test/expected/ts_merge-15.out new file mode 100644 index 00000000000..5e0aa61ec92 --- /dev/null +++ b/test/expected/ts_merge-15.out @@ -0,0 +1,2551 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +\set ON_ERROR_STOP 0 +\set VERBOSITY default +SET client_min_messages TO error; +\set TEST_BASE_NAME ts_merge +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') AS "TEST_LOAD_NAME", + format('include/%s_load_ht.sql', :'TEST_BASE_NAME') AS "TEST_LOAD_HT_NAME", + format('include/%s_query.sql', :'TEST_BASE_NAME') AS "TEST_QUERY_NAME", + format('%s/results/%s_results.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_WITH_HYPERTABLE", + format('%s/results/%s_ht_results.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_WITH_NO_HYPERTABLE" \gset +SELECT format('\! diff -u --label "Base pg table results" --label "Hyperatable results" %s %s', :'TEST_RESULTS_WITH_HYPERTABLE', :'TEST_RESULTS_WITH_NO_HYPERTABLE') AS "DIFF_CMD" \gset +\ir :TEST_LOAD_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE USER regress_merge_privs; +CREATE USER regress_merge_no_privs; +DROP TABLE IF EXISTS target; +DROP TABLE IF EXISTS source; +CREATE TABLE target (tid integer, balance integer) + WITH (autovacuum_enabled=off); +CREATE TABLE source (sid integer, delta integer) -- no index + WITH (autovacuum_enabled=off); +INSERT INTO target VALUES (1, 10); +INSERT INTO target VALUES (2, 20); +INSERT INTO target VALUES (3, 30); +SELECT t.ctid is not null as matched, t.*, s.* FROM source s FULL OUTER JOIN target t ON s.sid = t.tid ORDER BY t.tid, s.sid; + matched | tid | balance | sid | delta +---------+-----+---------+-----+------- + t | 1 | 10 | | + t | 2 | 20 | | + t | 3 | 30 | | +(3 rows) + +ALTER TABLE target OWNER TO regress_merge_privs; +ALTER TABLE source OWNER TO regress_merge_privs; +CREATE TABLE target2 (tid integer, balance integer) + WITH (autovacuum_enabled=off); +CREATE TABLE source2 (sid integer, delta integer) + WITH (autovacuum_enabled=off); +ALTER TABLE target2 OWNER TO regress_merge_no_privs; +ALTER TABLE source2 OWNER TO regress_merge_no_privs; +GRANT INSERT ON target TO regress_merge_no_privs; +GRANT CREATE ON SCHEMA public TO regress_merge_privs; +SET SESSION AUTHORIZATION regress_merge_privs; +CREATE TABLE sq_target (tid integer NOT NULL, balance integer) + WITH (autovacuum_enabled=off); +CREATE TABLE sq_source (delta integer, sid integer, balance integer DEFAULT 0) + WITH (autovacuum_enabled=off); +INSERT INTO sq_target(tid, balance) VALUES (1,100), (2,200), (3,300); +INSERT INTO sq_source(sid, delta) VALUES (1,10), (2,20), (4,40); +-- conditional WHEN clause +CREATE TABLE wq_target (tid integer not null, balance integer DEFAULT -1) + WITH (autovacuum_enabled=off); +CREATE TABLE wq_source (balance integer, sid integer) + WITH (autovacuum_enabled=off); +INSERT INTO wq_source (sid, balance) VALUES (1, 100); +CREATE TABLE cj_target (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE cj_source1 (sid1 integer, scat integer, delta integer) + WITH (autovacuum_enabled=off); +CREATE TABLE cj_source2 (sid2 integer, sval text) + WITH (autovacuum_enabled=off); +INSERT INTO cj_source1 VALUES (1, 10, 100); +INSERT INTO cj_source1 VALUES (1, 20, 200); +INSERT INTO cj_source1 VALUES (2, 20, 300); +INSERT INTO cj_source1 VALUES (3, 10, 400); +INSERT INTO cj_source2 VALUES (1, 'initial source2'); +INSERT INTO cj_source2 VALUES (2, 'initial source2'); +INSERT INTO cj_source2 VALUES (3, 'initial source2'); +CREATE TABLE fs_target (a int, b int, c text) + WITH (autovacuum_enabled=off); +-- run tests on normal table +\o :TEST_RESULTS_WITH_NO_HYPERTABLE +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- +-- Errors +-- +MERGE INTO target t RANDOMWORD +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:12: ERROR: syntax error at or near "RANDOMWORD" +LINE 1: MERGE INTO target t RANDOMWORD + ^ +-- MATCHED/INSERT error +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:18: ERROR: syntax error at or near "INSERT" +LINE 5: INSERT DEFAULT VALUES; + ^ +-- incorrectly specifying INTO target +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT INTO target DEFAULT VALUES; +psql:include/ts_merge_query.sql:24: ERROR: syntax error at or near "INTO" +LINE 5: INSERT INTO target DEFAULT VALUES; + ^ +-- Multiple VALUES clause +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (1,1), (2,2); +psql:include/ts_merge_query.sql:30: ERROR: syntax error at or near "," +LINE 5: INSERT VALUES (1,1), (2,2); + ^ +-- SELECT query for INSERT +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT SELECT (1, 1); +psql:include/ts_merge_query.sql:36: ERROR: syntax error at or near "SELECT" +LINE 5: INSERT SELECT (1, 1); + ^ +-- NOT MATCHED/UPDATE +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:42: ERROR: syntax error at or near "UPDATE" +LINE 5: UPDATE SET balance = 0; + ^ +-- UPDATE tablename +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE target SET balance = 0; +psql:include/ts_merge_query.sql:48: ERROR: syntax error at or near "target" +LINE 5: UPDATE target SET balance = 0; + ^ +-- source and target names the same +MERGE INTO target +USING target +ON tid = tid +WHEN MATCHED THEN DO NOTHING; +psql:include/ts_merge_query.sql:53: ERROR: name "target" specified more than once +DETAIL: The name is used both as MERGE target table and data source. +-- used in a CTE +WITH foo AS ( + MERGE INTO target USING source ON (true) + WHEN MATCHED THEN DELETE +) SELECT * FROM foo; +psql:include/ts_merge_query.sql:58: ERROR: MERGE not supported in WITH query +LINE 1: WITH foo AS ( + ^ +-- used in COPY +COPY ( + MERGE INTO target USING source ON (true) + WHEN MATCHED THEN DELETE +) TO stdout; +psql:include/ts_merge_query.sql:63: ERROR: MERGE not supported in COPY +-- unsupported relation types +-- view +CREATE VIEW tv AS SELECT * FROM target; +MERGE INTO tv t +USING source s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:72: ERROR: cannot execute MERGE on relation "tv" +DETAIL: This operation is not supported for views. +DROP VIEW tv; +-- materialized view +CREATE MATERIALIZED VIEW mv AS SELECT * FROM target; +MERGE INTO mv t +USING source s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:81: ERROR: cannot execute MERGE on relation "mv" +DETAIL: This operation is not supported for materialized views. +DROP MATERIALIZED VIEW mv; +-- permissions +MERGE INTO target +USING source2 +ON target.tid = source2.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:90: ERROR: permission denied for table source2 +GRANT INSERT ON target TO regress_merge_no_privs; +SET SESSION AUTHORIZATION regress_merge_no_privs; +MERGE INTO target +USING source2 +ON target.tid = source2.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:99: ERROR: permission denied for table target +GRANT UPDATE ON target2 TO regress_merge_privs; +SET SESSION AUTHORIZATION regress_merge_privs; +MERGE INTO target2 +USING source +ON target2.tid = source.sid +WHEN MATCHED THEN + DELETE; +psql:include/ts_merge_query.sql:108: ERROR: permission denied for table target2 +MERGE INTO target2 +USING source +ON target2.tid = source.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:114: ERROR: permission denied for table target2 +-- check if the target can be accessed from source relation subquery; we should +-- not be able to do so +MERGE INTO target t +USING (SELECT * FROM source WHERE t.tid > sid) s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:122: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 2: USING (SELECT * FROM source WHERE t.tid > sid) s + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +-- +-- initial tests +-- +-- zero rows in source has no effect +MERGE INTO target +USING source +ON target.tid = source.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +ROLLBACK; +-- insert some non-matching source rows to work from +INSERT INTO source VALUES (4, 40); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + DO NOTHING; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (5, 50); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- index plans +INSERT INTO target SELECT generate_series(1000,2500), 0; +ALTER TABLE target ADD PRIMARY KEY (tid); +ANALYZE target; +DELETE FROM target WHERE tid > 100; +ANALYZE target; +-- insert some matching source rows to work from +INSERT INTO source VALUES (2, 5); +INSERT INTO source VALUES (3, 20); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- equivalent of an UPDATE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- equivalent of a DELETE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DO NOTHING; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- duplicate source row causes multiple target row update ERROR +INSERT INTO source VALUES (2, 5); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:241: ERROR: MERGE command cannot affect row a second time +HINT: Ensure that not more than one source row matches any one target row. +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +psql:include/ts_merge_query.sql:249: ERROR: MERGE command cannot affect row a second time +HINT: Ensure that not more than one source row matches any one target row. +ROLLBACK; +-- remove duplicate MATCHED data from source data +DELETE FROM source WHERE sid = 2; +INSERT INTO source VALUES (2, 5); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- duplicate source row on INSERT should fail because of target_pkey +INSERT INTO source VALUES (4, 40); +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); +psql:include/ts_merge_query.sql:265: ERROR: duplicate key value violates unique constraint "target_pkey" +DETAIL: Key (tid)=(4) already exists. +SELECT * FROM target ORDER BY tid; +psql:include/ts_merge_query.sql:266: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- remove duplicate NOT MATCHED data from source data +DELETE FROM source WHERE sid = 4; +INSERT INTO source VALUES (4, 40); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- remove constraints +alter table target drop CONSTRAINT target_pkey; +alter table target alter column tid drop not null; +-- multiple actions +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, 4) +WHEN MATCHED THEN + UPDATE SET balance = 0; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- should be equivalent +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0 +WHEN NOT MATCHED THEN + INSERT VALUES (4, 4); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- column references +-- do a simple equivalent of an UPDATE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance + s.delta; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- do a simple equivalent of an INSERT SELECT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- and again with duplicate source rows +INSERT INTO source VALUES (5, 50); +INSERT INTO source VALUES (5, 50); +-- do a simple equivalent of an INSERT SELECT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- removing duplicate source rows +DELETE FROM source WHERE sid = 5; +-- and again with explicitly identified column list +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- and again with a subtle error: referring to non-existent target row for NOT MATCHED +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (t.tid, s.delta); +psql:include/ts_merge_query.sql:356: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +-- and again with a constant ON clause +BEGIN; +MERGE INTO target t +USING source AS s +ON (SELECT true) +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (t.tid, s.delta); +psql:include/ts_merge_query.sql:364: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +SELECT * FROM target ORDER BY tid; +psql:include/ts_merge_query.sql:365: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- now the classic UPSERT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance + s.delta +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- this time with a FALSE condition +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND FALSE THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +-- this time with an actual condition which returns false +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance <> 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +BEGIN; +-- and now with a condition which returns true +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +ROLLBACK; +-- conditions in the NOT MATCHED clause can only refer to source columns +BEGIN; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND t.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +psql:include/ts_merge_query.sql:408: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 3: WHEN NOT MATCHED AND t.balance = 100 THEN + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +SELECT * FROM wq_target; +psql:include/ts_merge_query.sql:409: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +-- conditions in MATCHED clause can refer to both source and target +SELECT * FROM wq_source; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND s.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check if AND works +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 AND s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 AND s.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check if OR works +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 OR s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 199 OR s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check source-side whole-row references +BEGIN; +MERGE INTO wq_target t +USING wq_source s ON (t.tid = s.sid) +WHEN matched and t = s or t.tid = s.sid THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +ROLLBACK; +-- check if subqueries work in the conditions? +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance > (SELECT max(balance) FROM target) THEN + UPDATE SET balance = t.balance + s.balance; +-- check if we can access system columns in the conditions +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.xmin = t.xmax THEN + UPDATE SET balance = t.balance + s.balance; +psql:include/ts_merge_query.sql:477: ERROR: cannot use system column "xmin" in MERGE WHEN condition +LINE 3: WHEN MATCHED AND t.xmin = t.xmax THEN + ^ +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.tableoid >= 0 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +DROP TABLE wq_target CASCADE; +DROP TABLE wq_source; +-- test triggers +create or replace function merge_trigfunc () returns trigger +language plpgsql as +$$ +DECLARE + line text; +BEGIN + SELECT INTO line format('%s %s %s trigger%s', + TG_WHEN, TG_OP, TG_LEVEL, CASE + WHEN TG_OP = 'INSERT' AND TG_LEVEL = 'ROW' + THEN format(' row: %s', NEW) + WHEN TG_OP = 'UPDATE' AND TG_LEVEL = 'ROW' + THEN format(' row: %s -> %s', OLD, NEW) + WHEN TG_OP = 'DELETE' AND TG_LEVEL = 'ROW' + THEN format(' row: %s', OLD) + END); + + RAISE NOTICE '%', line; + IF (TG_WHEN = 'BEFORE' AND TG_LEVEL = 'ROW') THEN + IF (TG_OP = 'DELETE') THEN + RETURN OLD; + ELSE + RETURN NEW; + END IF; + ELSE + RETURN NULL; + END IF; +END; +$$; +CREATE TRIGGER merge_bsi BEFORE INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bsu BEFORE UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bsd BEFORE DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asi AFTER INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asu AFTER UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asd AFTER DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bri BEFORE INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bru BEFORE UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_brd BEFORE DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_ari AFTER INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_aru AFTER UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_ard AFTER DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +-- now the classic UPSERT, with a DELETE +BEGIN; +UPDATE target SET balance = 0 WHERE tid = 3; +--EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND t.balance > s.delta THEN + UPDATE SET balance = t.balance - s.delta +WHEN MATCHED THEN + DELETE +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- Test behavior of triggers that turn UPDATE/DELETE into no-ops +create or replace function skip_merge_op() returns trigger +language plpgsql as +$$ +BEGIN + RETURN NULL; +END; +$$; +SELECT * FROM target full outer join source on (sid = tid); +create trigger merge_skip BEFORE INSERT OR UPDATE or DELETE + ON target FOR EACH ROW EXECUTE FUNCTION skip_merge_op(); +DO $$ +DECLARE + result integer; +BEGIN +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND s.sid = 3 THEN UPDATE SET balance = t.balance + s.delta +WHEN MATCHED THEN DELETE +WHEN NOT MATCHED THEN INSERT VALUES (sid, delta); +IF FOUND THEN + RAISE NOTICE 'Found'; +ELSE + RAISE NOTICE 'Not found'; +END IF; +GET DIAGNOSTICS result := ROW_COUNT; +RAISE NOTICE 'ROW_COUNT = %', result; +END; +$$; +SELECT * FROM target FULL OUTER JOIN source ON (sid = tid); +DROP TRIGGER merge_skip ON target; +DROP FUNCTION skip_merge_op(); +-- test from PL/pgSQL +-- make sure MERGE INTO isn't interpreted to mean returning variables like SELECT INTO +BEGIN; +DO LANGUAGE plpgsql $$ +BEGIN +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND t.balance > s.delta THEN + UPDATE SET balance = t.balance - s.delta; +END; +$$; +ROLLBACK; +--source constants +BEGIN; +MERGE INTO target t +USING (SELECT 9 AS sid, 57 AS delta) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +--source query +BEGIN; +MERGE INTO target t +USING (SELECT sid, delta FROM source WHERE delta > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING (SELECT sid, delta as newname FROM source WHERE delta > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.newname); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +--self-merge +BEGIN; +MERGE INTO target t1 +USING target t2 +ON t1.tid = t2.tid +WHEN MATCHED THEN + UPDATE SET balance = t1.balance + t2.balance +WHEN NOT MATCHED THEN + INSERT VALUES (t2.tid, t2.balance); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING (SELECT tid as sid, balance as delta FROM target WHERE balance > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING +(SELECT sid, max(delta) AS delta + FROM source + GROUP BY sid + HAVING count(*) = 1 + ORDER BY sid ASC) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- plpgsql parameters and results +BEGIN; +CREATE FUNCTION merge_func (p_id integer, p_bal integer) +RETURNS INTEGER +LANGUAGE plpgsql +AS $$ +DECLARE + result integer; +BEGIN +MERGE INTO target t +USING (SELECT p_id AS sid) AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance - p_bal; +IF FOUND THEN + GET DIAGNOSTICS result := ROW_COUNT; +END IF; +RETURN result; +END; +$$; +SELECT merge_func(3, 4); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- PREPARE +BEGIN; +prepare foom as merge into target t using (select 1 as sid) s on (t.tid = s.sid) when matched then update set balance = 1; +execute foom; +ROLLBACK; +BEGIN; +PREPARE foom2 (integer, integer) AS +MERGE INTO target t +USING (SELECT 1) s +ON t.tid = $1 +WHEN MATCHED THEN +UPDATE SET balance = $2; +--EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +execute foom2 (1, 1); +ROLLBACK; +-- subqueries in source relation +BEGIN; +MERGE INTO sq_target t +USING (SELECT * FROM sq_source) s +ON tid = sid +WHEN MATCHED AND t.balance > delta THEN + UPDATE SET balance = t.balance + delta; +SELECT * FROM sq_target ORDER BY tid; +ROLLBACK; +-- try a view +CREATE VIEW v AS SELECT * FROM sq_source WHERE sid < 2; +BEGIN; +MERGE INTO sq_target +USING v +ON tid = sid +WHEN MATCHED THEN + UPDATE SET balance = v.balance + delta; +SELECT * FROM sq_target ORDER BY tid; +ROLLBACK; +-- ambiguous reference to a column +BEGIN; +MERGE INTO sq_target +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +psql:include/ts_merge_query.sql:732: ERROR: column reference "balance" is ambiguous +LINE 5: UPDATE SET balance = balance + delta + ^ +ROLLBACK; +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +SELECT * FROM sq_target; +ROLLBACK; +-- CTEs +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +WITH targq AS ( + SELECT * FROM v +) +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +ROLLBACK; +-- RETURNING +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE +RETURNING *; +psql:include/ts_merge_query.sql:778: ERROR: syntax error at or near "RETURNING" +LINE 10: RETURNING *; + ^ +ROLLBACK; +-- EXPLAIN +CREATE TABLE ex_mtarget (a int, b int) + WITH (autovacuum_enabled=off); +CREATE TABLE ex_msource (a int, b int) + WITH (autovacuum_enabled=off); +INSERT INTO ex_mtarget SELECT i, i*10 FROM generate_series(1,100,2) i; +INSERT INTO ex_msource SELECT i, i*10 FROM generate_series(1,100,1) i; +CREATE FUNCTION explain_merge(query text) RETURNS SETOF text +LANGUAGE plpgsql AS +$$ +DECLARE ln text; +BEGIN + FOR ln IN + EXECUTE 'explain (analyze, timing off, summary off, costs off) ' || + query + LOOP + ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g'); + RETURN NEXT ln; + END LOOP; +END; +$$; +-- only updates +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED THEN + UPDATE SET b = t.b + 1'); +-- only updates to selected tuples +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1'); +-- updates + deletes +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1 +WHEN MATCHED AND t.a >= 10 AND t.a <= 20 THEN + DELETE'); +-- only inserts +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN NOT MATCHED AND s.a < 10 THEN + INSERT VALUES (a, b)'); +-- all three +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1 +WHEN MATCHED AND t.a >= 30 AND t.a <= 40 THEN + DELETE +WHEN NOT MATCHED AND s.a < 20 THEN + INSERT VALUES (a, b)'); +-- nothing +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a AND t.a < -1000 +WHEN MATCHED AND t.a < 10 THEN + DO NOTHING'); +DROP TABLE ex_msource, ex_mtarget; +DROP FUNCTION explain_merge(text); +-- Subqueries +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED THEN + UPDATE SET balance = (SELECT count(*) FROM sq_target); +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND (SELECT count(*) > 0 FROM sq_target) THEN + UPDATE SET balance = 42; +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid AND (SELECT count(*) > 0 FROM sq_target) +WHEN MATCHED THEN + UPDATE SET balance = 42; +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +DROP TABLE sq_target CASCADE; +DROP TABLE sq_source CASCADE; +CREATE TABLE pa_target (tid integer, balance float, val text) + PARTITION BY LIST (tid); +CREATE TABLE part1 PARTITION OF pa_target FOR VALUES IN (1,4) + WITH (autovacuum_enabled=off); +CREATE TABLE part2 PARTITION OF pa_target FOR VALUES IN (2,5,6) + WITH (autovacuum_enabled=off); +CREATE TABLE part3 PARTITION OF pa_target FOR VALUES IN (3,8,9) + WITH (autovacuum_enabled=off); +CREATE TABLE part4 PARTITION OF pa_target DEFAULT + WITH (autovacuum_enabled=off); +CREATE TABLE pa_source (sid integer, delta float); +-- insert many rows to the source table +INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- same with a constant qual +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND tid = 1 + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- try updating the partition key column +BEGIN; +CREATE FUNCTION merge_func() RETURNS integer LANGUAGE plpgsql AS $$ +DECLARE + result integer; +BEGIN +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +IF FOUND THEN + GET DIAGNOSTICS result := ROW_COUNT; +END IF; +RETURN result; +END; +$$; +SELECT merge_func(); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_target CASCADE; +-- The target table is partitioned in the same way, but this time by attaching +-- partitions which have columns in different order, dropped columns etc. +CREATE TABLE pa_target (tid integer, balance float, val text) + PARTITION BY LIST (tid); +CREATE TABLE part1 (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part2 (balance float, tid integer, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part3 (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part4 (extraid text, tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +ALTER TABLE part4 DROP COLUMN extraid; +ALTER TABLE pa_target ATTACH PARTITION part1 FOR VALUES IN (1,4); +ALTER TABLE pa_target ATTACH PARTITION part2 FOR VALUES IN (2,5,6); +ALTER TABLE pa_target ATTACH PARTITION part3 FOR VALUES IN (3,8,9); +ALTER TABLE pa_target ATTACH PARTITION part4 DEFAULT; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- same with a constant qual +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND tid IN (1, 5) + WHEN MATCHED AND tid % 5 = 0 THEN DELETE + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- try updating the partition key column +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- Sub-partitioning +CREATE TABLE pa_target (logts timestamp, tid integer, balance float, val text) + PARTITION BY RANGE (logts); +CREATE TABLE part_m01 PARTITION OF pa_target + FOR VALUES FROM ('2017-01-01') TO ('2017-02-01') + PARTITION BY LIST (tid); +CREATE TABLE part_m01_odd PARTITION OF part_m01 + FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); +CREATE TABLE part_m01_even PARTITION OF part_m01 + FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); +CREATE TABLE part_m02 PARTITION OF pa_target + FOR VALUES FROM ('2017-02-01') TO ('2017-03-01') + PARTITION BY LIST (tid); +CREATE TABLE part_m02_odd PARTITION OF part_m02 + FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); +CREATE TABLE part_m02_even PARTITION OF part_m02 + FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); +CREATE TABLE pa_source (sid integer, delta float) + WITH (autovacuum_enabled=off); +-- insert many rows to the source table +INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT '2017-01-31', id, id * 100, 'initial' FROM generate_series(1,9,3) AS id; +INSERT INTO pa_target SELECT '2017-02-28', id, id * 100, 'initial' FROM generate_series(2,9,3) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING (SELECT '2017-01-15' AS slogts, * FROM pa_source WHERE sid < 10) s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (slogts::timestamp, sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- some complex joins on the source side +-- source relation is an unaliased join +MERGE INTO cj_target t +USING cj_source1 s1 + INNER JOIN cj_source2 s2 ON sid1 = sid2 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid1, delta, sval); +-- try accessing columns from either side of the source join +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid2, delta, sval) +WHEN MATCHED THEN + DELETE; +-- some simple expressions in INSERT targetlist +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid2, delta + scat, sval) +WHEN MATCHED THEN + UPDATE SET val = val || ' updated by merge'; +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 +ON t.tid = sid1 +WHEN MATCHED THEN + UPDATE SET val = val || ' ' || delta::text; +SELECT * FROM cj_target ORDER BY tid; +ALTER TABLE cj_source1 RENAME COLUMN sid1 TO sid; +ALTER TABLE cj_source2 RENAME COLUMN sid2 TO sid; +TRUNCATE cj_target; +MERGE INTO cj_target t +USING cj_source1 s1 + INNER JOIN cj_source2 s2 ON s1.sid = s2.sid +ON t.tid = s1.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s2.sid, delta, sval); +DROP TABLE cj_source2, cj_source1; +DROP TABLE cj_target CASCADE; +-- Function scans +MERGE INTO fs_target t +USING generate_series(1,100,1) AS id +ON t.a = id +WHEN MATCHED THEN + UPDATE SET b = b + id +WHEN NOT MATCHED THEN + INSERT VALUES (id, -1); +MERGE INTO fs_target t +USING generate_series(1,100,2) AS id +ON t.a = id +WHEN MATCHED THEN + UPDATE SET b = b + id, c = 'updated '|| id.*::text +WHEN NOT MATCHED THEN + INSERT VALUES (id, -1, 'inserted ' || id.*::text); +SELECT count(*) FROM fs_target; +DROP TABLE fs_target CASCADE; +-- SERIALIZABLE test +-- handled in isolation tests +-- Inheritance-based partitioning +CREATE TABLE measurement ( + city_id int not null, + logdate date not null, + peaktemp int, + unitsales int +) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2006m02 ( + CHECK ( logdate >= DATE '2006-02-01' AND logdate < DATE '2006-03-01' ) +) INHERITS (measurement) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2006m03 ( + CHECK ( logdate >= DATE '2006-03-01' AND logdate < DATE '2006-04-01' ) +) INHERITS (measurement) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2007m01 ( + filler text, + peaktemp int, + logdate date not null, + city_id int not null, + unitsales int + CHECK ( logdate >= DATE '2007-01-01' AND logdate < DATE '2007-02-01') +) WITH (autovacuum_enabled=off); +ALTER TABLE measurement_y2007m01 DROP COLUMN filler; +ALTER TABLE measurement_y2007m01 INHERIT measurement; +INSERT INTO measurement VALUES (0, '2005-07-21', 5, 15); +CREATE OR REPLACE FUNCTION measurement_insert_trigger() +RETURNS TRIGGER AS $$ +BEGIN + IF ( NEW.logdate >= DATE '2006-02-01' AND + NEW.logdate < DATE '2006-03-01' ) THEN + INSERT INTO measurement_y2006m02 VALUES (NEW.*); + ELSIF ( NEW.logdate >= DATE '2006-03-01' AND + NEW.logdate < DATE '2006-04-01' ) THEN + INSERT INTO measurement_y2006m03 VALUES (NEW.*); + ELSIF ( NEW.logdate >= DATE '2007-01-01' AND + NEW.logdate < DATE '2007-02-01' ) THEN + INSERT INTO measurement_y2007m01 (city_id, logdate, peaktemp, unitsales) + VALUES (NEW.*); + ELSE + RAISE EXCEPTION 'Date out of range. Fix the measurement_insert_trigger() function!'; + END IF; + RETURN NULL; +END; +$$ LANGUAGE plpgsql ; +CREATE TRIGGER insert_measurement_trigger + BEFORE INSERT ON measurement + FOR EACH ROW EXECUTE PROCEDURE measurement_insert_trigger(); +INSERT INTO measurement VALUES (1, '2006-02-10', 35, 10); +INSERT INTO measurement VALUES (1, '2006-02-16', 45, 20); +INSERT INTO measurement VALUES (1, '2006-03-17', 25, 10); +INSERT INTO measurement VALUES (1, '2006-03-27', 15, 40); +INSERT INTO measurement VALUES (1, '2007-01-15', 10, 10); +INSERT INTO measurement VALUES (1, '2007-01-17', 10, 10); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; +CREATE TABLE new_measurement (LIKE measurement) WITH (autovacuum_enabled=off); +INSERT INTO new_measurement VALUES (0, '2005-07-21', 25, 20); +INSERT INTO new_measurement VALUES (1, '2006-03-01', 20, 10); +INSERT INTO new_measurement VALUES (1, '2006-02-16', 50, 10); +INSERT INTO new_measurement VALUES (2, '2006-02-10', 20, 20); +INSERT INTO new_measurement VALUES (1, '2006-03-27', NULL, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-17', NULL, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-15', 5, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-16', 10, 10); +BEGIN; +MERGE INTO ONLY measurement m + USING new_measurement nm ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE +WHEN MATCHED THEN UPDATE + SET peaktemp = greatest(m.peaktemp, nm.peaktemp), + unitsales = m.unitsales + coalesce(nm.unitsales, 0) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id, logdate, peaktemp, unitsales); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate, peaktemp; +ROLLBACK; +MERGE into measurement m + USING new_measurement nm ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE +WHEN MATCHED THEN UPDATE + SET peaktemp = greatest(m.peaktemp, nm.peaktemp), + unitsales = m.unitsales + coalesce(nm.unitsales, 0) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id, logdate, peaktemp, unitsales); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; +BEGIN; +MERGE INTO new_measurement nm + USING ONLY measurement m ON + (nm.city_id = m.city_id and nm.logdate=m.logdate) +WHEN MATCHED THEN DELETE; +SELECT * FROM new_measurement ORDER BY city_id, logdate; +ROLLBACK; +MERGE INTO new_measurement nm + USING measurement m ON + (nm.city_id = m.city_id and nm.logdate=m.logdate) +WHEN MATCHED THEN DELETE; +SELECT * FROM new_measurement ORDER BY city_id, logdate; +DROP TABLE measurement, new_measurement CASCADE; +DROP FUNCTION measurement_insert_trigger(); +RESET SESSION AUTHORIZATION; +DROP TABLE target CASCADE; +DROP TABLE target2 CASCADE; +DROP TABLE source, source2; +DROP FUNCTION merge_trigfunc(); +REVOKE CREATE ON SCHEMA public FROM regress_merge_privs; +DROP USER regress_merge_privs; +DROP USER regress_merge_no_privs; +\o +\ir :TEST_LOAD_HT_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE USER regress_merge_privs; +CREATE USER regress_merge_no_privs; +DROP TABLE IF EXISTS target; +DROP TABLE IF EXISTS source; +CREATE TABLE target (tid integer, balance integer) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('target', 'tid', chunk_time_interval => 3); + create_hypertable +--------------------- + (1,public,target,t) +(1 row) + +CREATE TABLE source (sid integer, delta integer) -- no index + WITH (autovacuum_enabled=off); +INSERT INTO target VALUES (1, 10); +INSERT INTO target VALUES (2, 20); +INSERT INTO target VALUES (3, 30); +SELECT t.ctid is not null as matched, t.*, s.* FROM source s FULL OUTER JOIN target t ON s.sid = t.tid ORDER BY t.tid, s.sid; + matched | tid | balance | sid | delta +---------+-----+---------+-----+------- + t | 1 | 10 | | + t | 2 | 20 | | + t | 3 | 30 | | +(3 rows) + +ALTER TABLE target OWNER TO regress_merge_privs; +ALTER TABLE source OWNER TO regress_merge_privs; +CREATE TABLE target2 (tid integer, balance integer) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('target2', 'tid', chunk_time_interval => 3); + create_hypertable +---------------------- + (2,public,target2,t) +(1 row) + +CREATE TABLE source2 (sid integer, delta integer) + WITH (autovacuum_enabled=off); +ALTER TABLE target2 OWNER TO regress_merge_no_privs; +ALTER TABLE source2 OWNER TO regress_merge_no_privs; +GRANT INSERT ON target TO regress_merge_no_privs; +GRANT CREATE ON SCHEMA public TO regress_merge_privs; +SET SESSION AUTHORIZATION regress_merge_privs; +CREATE TABLE sq_target (tid integer NOT NULL, balance integer) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('sq_target', 'tid', chunk_time_interval => 3); + create_hypertable +------------------------ + (3,public,sq_target,t) +(1 row) + +CREATE TABLE sq_source (delta integer, sid integer, balance integer DEFAULT 0) + WITH (autovacuum_enabled=off); +INSERT INTO sq_target(tid, balance) VALUES (1,100), (2,200), (3,300); +INSERT INTO sq_source(sid, delta) VALUES (1,10), (2,20), (4,40); +-- conditional WHEN clause +CREATE TABLE wq_target (tid integer not null, balance integer DEFAULT -1) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('wq_target', 'tid', chunk_time_interval => 3); + create_hypertable +------------------------ + (4,public,wq_target,t) +(1 row) + +CREATE TABLE wq_source (balance integer, sid integer) + WITH (autovacuum_enabled=off); +INSERT INTO wq_source (sid, balance) VALUES (1, 100); +-- some complex joins on the source side +CREATE TABLE cj_target (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('cj_target', 'tid', chunk_time_interval => 3); + create_hypertable +------------------------ + (5,public,cj_target,t) +(1 row) + +CREATE TABLE cj_source1 (sid1 integer, scat integer, delta integer) + WITH (autovacuum_enabled=off); +CREATE TABLE cj_source2 (sid2 integer, sval text) + WITH (autovacuum_enabled=off); +INSERT INTO cj_source1 VALUES (1, 10, 100); +INSERT INTO cj_source1 VALUES (1, 20, 200); +INSERT INTO cj_source1 VALUES (2, 20, 300); +INSERT INTO cj_source1 VALUES (3, 10, 400); +INSERT INTO cj_source2 VALUES (1, 'initial source2'); +INSERT INTO cj_source2 VALUES (2, 'initial source2'); +INSERT INTO cj_source2 VALUES (3, 'initial source2'); +CREATE TABLE fs_target (a int, b int, c text) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('fs_target', 'a', chunk_time_interval => 3); + create_hypertable +------------------------ + (6,public,fs_target,t) +(1 row) + +-- run tests on hypertable +\o :TEST_RESULTS_WITH_HYPERTABLE +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- +-- Errors +-- +MERGE INTO target t RANDOMWORD +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:12: ERROR: syntax error at or near "RANDOMWORD" +LINE 1: MERGE INTO target t RANDOMWORD + ^ +-- MATCHED/INSERT error +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:18: ERROR: syntax error at or near "INSERT" +LINE 5: INSERT DEFAULT VALUES; + ^ +-- incorrectly specifying INTO target +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT INTO target DEFAULT VALUES; +psql:include/ts_merge_query.sql:24: ERROR: syntax error at or near "INTO" +LINE 5: INSERT INTO target DEFAULT VALUES; + ^ +-- Multiple VALUES clause +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (1,1), (2,2); +psql:include/ts_merge_query.sql:30: ERROR: syntax error at or near "," +LINE 5: INSERT VALUES (1,1), (2,2); + ^ +-- SELECT query for INSERT +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT SELECT (1, 1); +psql:include/ts_merge_query.sql:36: ERROR: syntax error at or near "SELECT" +LINE 5: INSERT SELECT (1, 1); + ^ +-- NOT MATCHED/UPDATE +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:42: ERROR: syntax error at or near "UPDATE" +LINE 5: UPDATE SET balance = 0; + ^ +-- UPDATE tablename +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE target SET balance = 0; +psql:include/ts_merge_query.sql:48: ERROR: syntax error at or near "target" +LINE 5: UPDATE target SET balance = 0; + ^ +-- source and target names the same +MERGE INTO target +USING target +ON tid = tid +WHEN MATCHED THEN DO NOTHING; +psql:include/ts_merge_query.sql:53: ERROR: name "target" specified more than once +DETAIL: The name is used both as MERGE target table and data source. +-- used in a CTE +WITH foo AS ( + MERGE INTO target USING source ON (true) + WHEN MATCHED THEN DELETE +) SELECT * FROM foo; +psql:include/ts_merge_query.sql:58: ERROR: MERGE not supported in WITH query +LINE 1: WITH foo AS ( + ^ +-- used in COPY +COPY ( + MERGE INTO target USING source ON (true) + WHEN MATCHED THEN DELETE +) TO stdout; +psql:include/ts_merge_query.sql:63: ERROR: MERGE not supported in COPY +-- unsupported relation types +-- view +CREATE VIEW tv AS SELECT * FROM target; +MERGE INTO tv t +USING source s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:72: ERROR: cannot execute MERGE on relation "tv" +DETAIL: This operation is not supported for views. +DROP VIEW tv; +-- materialized view +CREATE MATERIALIZED VIEW mv AS SELECT * FROM target; +MERGE INTO mv t +USING source s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:81: ERROR: cannot execute MERGE on relation "mv" +DETAIL: This operation is not supported for materialized views. +DROP MATERIALIZED VIEW mv; +-- permissions +MERGE INTO target +USING source2 +ON target.tid = source2.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:90: ERROR: permission denied for table source2 +GRANT INSERT ON target TO regress_merge_no_privs; +SET SESSION AUTHORIZATION regress_merge_no_privs; +MERGE INTO target +USING source2 +ON target.tid = source2.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:99: ERROR: permission denied for table target +GRANT UPDATE ON target2 TO regress_merge_privs; +SET SESSION AUTHORIZATION regress_merge_privs; +MERGE INTO target2 +USING source +ON target2.tid = source.sid +WHEN MATCHED THEN + DELETE; +psql:include/ts_merge_query.sql:108: ERROR: permission denied for table target2 +MERGE INTO target2 +USING source +ON target2.tid = source.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:114: ERROR: permission denied for table target2 +-- check if the target can be accessed from source relation subquery; we should +-- not be able to do so +MERGE INTO target t +USING (SELECT * FROM source WHERE t.tid > sid) s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:122: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 2: USING (SELECT * FROM source WHERE t.tid > sid) s + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +-- +-- initial tests +-- +-- zero rows in source has no effect +MERGE INTO target +USING source +ON target.tid = source.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +ROLLBACK; +-- insert some non-matching source rows to work from +INSERT INTO source VALUES (4, 40); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + DO NOTHING; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (5, 50); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- index plans +INSERT INTO target SELECT generate_series(1000,2500), 0; +ALTER TABLE target ADD PRIMARY KEY (tid); +ANALYZE target; +DELETE FROM target WHERE tid > 100; +ANALYZE target; +-- insert some matching source rows to work from +INSERT INTO source VALUES (2, 5); +INSERT INTO source VALUES (3, 20); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- equivalent of an UPDATE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- equivalent of a DELETE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DO NOTHING; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- duplicate source row causes multiple target row update ERROR +INSERT INTO source VALUES (2, 5); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:241: ERROR: MERGE command cannot affect row a second time +HINT: Ensure that not more than one source row matches any one target row. +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +psql:include/ts_merge_query.sql:249: ERROR: MERGE command cannot affect row a second time +HINT: Ensure that not more than one source row matches any one target row. +ROLLBACK; +-- remove duplicate MATCHED data from source data +DELETE FROM source WHERE sid = 2; +INSERT INTO source VALUES (2, 5); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- duplicate source row on INSERT should fail because of target_pkey +INSERT INTO source VALUES (4, 40); +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); +psql:include/ts_merge_query.sql:265: ERROR: duplicate key value violates unique constraint "2_2_target_pkey" +DETAIL: Key (tid)=(4) already exists. +SELECT * FROM target ORDER BY tid; +psql:include/ts_merge_query.sql:266: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- remove duplicate NOT MATCHED data from source data +DELETE FROM source WHERE sid = 4; +INSERT INTO source VALUES (4, 40); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- remove constraints +alter table target drop CONSTRAINT target_pkey; +alter table target alter column tid drop not null; +psql:include/ts_merge_query.sql:277: ERROR: cannot drop not-null constraint from a time-partitioned column +-- multiple actions +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, 4) +WHEN MATCHED THEN + UPDATE SET balance = 0; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- should be equivalent +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0 +WHEN NOT MATCHED THEN + INSERT VALUES (4, 4); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- column references +-- do a simple equivalent of an UPDATE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance + s.delta; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- do a simple equivalent of an INSERT SELECT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- and again with duplicate source rows +INSERT INTO source VALUES (5, 50); +INSERT INTO source VALUES (5, 50); +-- do a simple equivalent of an INSERT SELECT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- removing duplicate source rows +DELETE FROM source WHERE sid = 5; +-- and again with explicitly identified column list +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- and again with a subtle error: referring to non-existent target row for NOT MATCHED +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (t.tid, s.delta); +psql:include/ts_merge_query.sql:356: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +-- and again with a constant ON clause +BEGIN; +MERGE INTO target t +USING source AS s +ON (SELECT true) +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (t.tid, s.delta); +psql:include/ts_merge_query.sql:364: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +SELECT * FROM target ORDER BY tid; +psql:include/ts_merge_query.sql:365: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- now the classic UPSERT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance + s.delta +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- this time with a FALSE condition +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND FALSE THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +-- this time with an actual condition which returns false +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance <> 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +BEGIN; +-- and now with a condition which returns true +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +ROLLBACK; +-- conditions in the NOT MATCHED clause can only refer to source columns +BEGIN; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND t.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +psql:include/ts_merge_query.sql:408: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 3: WHEN NOT MATCHED AND t.balance = 100 THEN + ^ +HINT: There is an entry for table "t", but it cannot be referenced from this part of the query. +SELECT * FROM wq_target; +psql:include/ts_merge_query.sql:409: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +-- conditions in MATCHED clause can refer to both source and target +SELECT * FROM wq_source; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND s.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check if AND works +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 AND s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 AND s.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check if OR works +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 OR s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 199 OR s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check source-side whole-row references +BEGIN; +MERGE INTO wq_target t +USING wq_source s ON (t.tid = s.sid) +WHEN matched and t = s or t.tid = s.sid THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +ROLLBACK; +-- check if subqueries work in the conditions? +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance > (SELECT max(balance) FROM target) THEN + UPDATE SET balance = t.balance + s.balance; +-- check if we can access system columns in the conditions +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.xmin = t.xmax THEN + UPDATE SET balance = t.balance + s.balance; +psql:include/ts_merge_query.sql:477: ERROR: cannot use system column "xmin" in MERGE WHEN condition +LINE 3: WHEN MATCHED AND t.xmin = t.xmax THEN + ^ +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.tableoid >= 0 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +DROP TABLE wq_target CASCADE; +DROP TABLE wq_source; +-- test triggers +create or replace function merge_trigfunc () returns trigger +language plpgsql as +$$ +DECLARE + line text; +BEGIN + SELECT INTO line format('%s %s %s trigger%s', + TG_WHEN, TG_OP, TG_LEVEL, CASE + WHEN TG_OP = 'INSERT' AND TG_LEVEL = 'ROW' + THEN format(' row: %s', NEW) + WHEN TG_OP = 'UPDATE' AND TG_LEVEL = 'ROW' + THEN format(' row: %s -> %s', OLD, NEW) + WHEN TG_OP = 'DELETE' AND TG_LEVEL = 'ROW' + THEN format(' row: %s', OLD) + END); + + RAISE NOTICE '%', line; + IF (TG_WHEN = 'BEFORE' AND TG_LEVEL = 'ROW') THEN + IF (TG_OP = 'DELETE') THEN + RETURN OLD; + ELSE + RETURN NEW; + END IF; + ELSE + RETURN NULL; + END IF; +END; +$$; +CREATE TRIGGER merge_bsi BEFORE INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bsu BEFORE UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bsd BEFORE DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asi AFTER INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asu AFTER UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asd AFTER DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bri BEFORE INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bru BEFORE UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_brd BEFORE DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_ari AFTER INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_aru AFTER UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_ard AFTER DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +-- now the classic UPSERT, with a DELETE +BEGIN; +UPDATE target SET balance = 0 WHERE tid = 3; +--EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND t.balance > s.delta THEN + UPDATE SET balance = t.balance - s.delta +WHEN MATCHED THEN + DELETE +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- Test behavior of triggers that turn UPDATE/DELETE into no-ops +create or replace function skip_merge_op() returns trigger +language plpgsql as +$$ +BEGIN + RETURN NULL; +END; +$$; +SELECT * FROM target full outer join source on (sid = tid); +create trigger merge_skip BEFORE INSERT OR UPDATE or DELETE + ON target FOR EACH ROW EXECUTE FUNCTION skip_merge_op(); +DO $$ +DECLARE + result integer; +BEGIN +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND s.sid = 3 THEN UPDATE SET balance = t.balance + s.delta +WHEN MATCHED THEN DELETE +WHEN NOT MATCHED THEN INSERT VALUES (sid, delta); +IF FOUND THEN + RAISE NOTICE 'Found'; +ELSE + RAISE NOTICE 'Not found'; +END IF; +GET DIAGNOSTICS result := ROW_COUNT; +RAISE NOTICE 'ROW_COUNT = %', result; +END; +$$; +SELECT * FROM target FULL OUTER JOIN source ON (sid = tid); +DROP TRIGGER merge_skip ON target; +DROP FUNCTION skip_merge_op(); +-- test from PL/pgSQL +-- make sure MERGE INTO isn't interpreted to mean returning variables like SELECT INTO +BEGIN; +DO LANGUAGE plpgsql $$ +BEGIN +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND t.balance > s.delta THEN + UPDATE SET balance = t.balance - s.delta; +END; +$$; +ROLLBACK; +--source constants +BEGIN; +MERGE INTO target t +USING (SELECT 9 AS sid, 57 AS delta) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +--source query +BEGIN; +MERGE INTO target t +USING (SELECT sid, delta FROM source WHERE delta > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING (SELECT sid, delta as newname FROM source WHERE delta > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.newname); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +--self-merge +BEGIN; +MERGE INTO target t1 +USING target t2 +ON t1.tid = t2.tid +WHEN MATCHED THEN + UPDATE SET balance = t1.balance + t2.balance +WHEN NOT MATCHED THEN + INSERT VALUES (t2.tid, t2.balance); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING (SELECT tid as sid, balance as delta FROM target WHERE balance > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING +(SELECT sid, max(delta) AS delta + FROM source + GROUP BY sid + HAVING count(*) = 1 + ORDER BY sid ASC) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- plpgsql parameters and results +BEGIN; +CREATE FUNCTION merge_func (p_id integer, p_bal integer) +RETURNS INTEGER +LANGUAGE plpgsql +AS $$ +DECLARE + result integer; +BEGIN +MERGE INTO target t +USING (SELECT p_id AS sid) AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance - p_bal; +IF FOUND THEN + GET DIAGNOSTICS result := ROW_COUNT; +END IF; +RETURN result; +END; +$$; +SELECT merge_func(3, 4); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- PREPARE +BEGIN; +prepare foom as merge into target t using (select 1 as sid) s on (t.tid = s.sid) when matched then update set balance = 1; +psql:include/ts_merge_query.sql:685: ERROR: prepared statement "foom" already exists +execute foom; +psql:include/ts_merge_query.sql:686: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +BEGIN; +PREPARE foom2 (integer, integer) AS +MERGE INTO target t +USING (SELECT 1) s +ON t.tid = $1 +WHEN MATCHED THEN +UPDATE SET balance = $2; +psql:include/ts_merge_query.sql:695: ERROR: prepared statement "foom2" already exists +--EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +execute foom2 (1, 1); +psql:include/ts_merge_query.sql:697: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- subqueries in source relation +BEGIN; +MERGE INTO sq_target t +USING (SELECT * FROM sq_source) s +ON tid = sid +WHEN MATCHED AND t.balance > delta THEN + UPDATE SET balance = t.balance + delta; +SELECT * FROM sq_target ORDER BY tid; +ROLLBACK; +-- try a view +CREATE VIEW v AS SELECT * FROM sq_source WHERE sid < 2; +BEGIN; +MERGE INTO sq_target +USING v +ON tid = sid +WHEN MATCHED THEN + UPDATE SET balance = v.balance + delta; +SELECT * FROM sq_target ORDER BY tid; +ROLLBACK; +-- ambiguous reference to a column +BEGIN; +MERGE INTO sq_target +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +psql:include/ts_merge_query.sql:732: ERROR: column reference "balance" is ambiguous +LINE 5: UPDATE SET balance = balance + delta + ^ +ROLLBACK; +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +SELECT * FROM sq_target; +ROLLBACK; +-- CTEs +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +WITH targq AS ( + SELECT * FROM v +) +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +ROLLBACK; +-- RETURNING +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE +RETURNING *; +psql:include/ts_merge_query.sql:778: ERROR: syntax error at or near "RETURNING" +LINE 10: RETURNING *; + ^ +ROLLBACK; +-- EXPLAIN +CREATE TABLE ex_mtarget (a int, b int) + WITH (autovacuum_enabled=off); +CREATE TABLE ex_msource (a int, b int) + WITH (autovacuum_enabled=off); +INSERT INTO ex_mtarget SELECT i, i*10 FROM generate_series(1,100,2) i; +INSERT INTO ex_msource SELECT i, i*10 FROM generate_series(1,100,1) i; +CREATE FUNCTION explain_merge(query text) RETURNS SETOF text +LANGUAGE plpgsql AS +$$ +DECLARE ln text; +BEGIN + FOR ln IN + EXECUTE 'explain (analyze, timing off, summary off, costs off) ' || + query + LOOP + ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g'); + RETURN NEXT ln; + END LOOP; +END; +$$; +-- only updates +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED THEN + UPDATE SET b = t.b + 1'); +-- only updates to selected tuples +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1'); +-- updates + deletes +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1 +WHEN MATCHED AND t.a >= 10 AND t.a <= 20 THEN + DELETE'); +-- only inserts +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN NOT MATCHED AND s.a < 10 THEN + INSERT VALUES (a, b)'); +-- all three +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1 +WHEN MATCHED AND t.a >= 30 AND t.a <= 40 THEN + DELETE +WHEN NOT MATCHED AND s.a < 20 THEN + INSERT VALUES (a, b)'); +-- nothing +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a AND t.a < -1000 +WHEN MATCHED AND t.a < 10 THEN + DO NOTHING'); +DROP TABLE ex_msource, ex_mtarget; +DROP FUNCTION explain_merge(text); +-- Subqueries +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED THEN + UPDATE SET balance = (SELECT count(*) FROM sq_target); +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND (SELECT count(*) > 0 FROM sq_target) THEN + UPDATE SET balance = 42; +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid AND (SELECT count(*) > 0 FROM sq_target) +WHEN MATCHED THEN + UPDATE SET balance = 42; +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +DROP TABLE sq_target CASCADE; +DROP TABLE sq_source CASCADE; +CREATE TABLE pa_target (tid integer, balance float, val text) + PARTITION BY LIST (tid); +CREATE TABLE part1 PARTITION OF pa_target FOR VALUES IN (1,4) + WITH (autovacuum_enabled=off); +CREATE TABLE part2 PARTITION OF pa_target FOR VALUES IN (2,5,6) + WITH (autovacuum_enabled=off); +CREATE TABLE part3 PARTITION OF pa_target FOR VALUES IN (3,8,9) + WITH (autovacuum_enabled=off); +CREATE TABLE part4 PARTITION OF pa_target DEFAULT + WITH (autovacuum_enabled=off); +CREATE TABLE pa_source (sid integer, delta float); +-- insert many rows to the source table +INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- same with a constant qual +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND tid = 1 + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- try updating the partition key column +BEGIN; +CREATE FUNCTION merge_func() RETURNS integer LANGUAGE plpgsql AS $$ +DECLARE + result integer; +BEGIN +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +IF FOUND THEN + GET DIAGNOSTICS result := ROW_COUNT; +END IF; +RETURN result; +END; +$$; +SELECT merge_func(); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_target CASCADE; +-- The target table is partitioned in the same way, but this time by attaching +-- partitions which have columns in different order, dropped columns etc. +CREATE TABLE pa_target (tid integer, balance float, val text) + PARTITION BY LIST (tid); +CREATE TABLE part1 (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part2 (balance float, tid integer, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part3 (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part4 (extraid text, tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +ALTER TABLE part4 DROP COLUMN extraid; +ALTER TABLE pa_target ATTACH PARTITION part1 FOR VALUES IN (1,4); +ALTER TABLE pa_target ATTACH PARTITION part2 FOR VALUES IN (2,5,6); +ALTER TABLE pa_target ATTACH PARTITION part3 FOR VALUES IN (3,8,9); +ALTER TABLE pa_target ATTACH PARTITION part4 DEFAULT; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- same with a constant qual +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND tid IN (1, 5) + WHEN MATCHED AND tid % 5 = 0 THEN DELETE + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- try updating the partition key column +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- Sub-partitioning +CREATE TABLE pa_target (logts timestamp, tid integer, balance float, val text) + PARTITION BY RANGE (logts); +CREATE TABLE part_m01 PARTITION OF pa_target + FOR VALUES FROM ('2017-01-01') TO ('2017-02-01') + PARTITION BY LIST (tid); +CREATE TABLE part_m01_odd PARTITION OF part_m01 + FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); +CREATE TABLE part_m01_even PARTITION OF part_m01 + FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); +CREATE TABLE part_m02 PARTITION OF pa_target + FOR VALUES FROM ('2017-02-01') TO ('2017-03-01') + PARTITION BY LIST (tid); +CREATE TABLE part_m02_odd PARTITION OF part_m02 + FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); +CREATE TABLE part_m02_even PARTITION OF part_m02 + FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); +CREATE TABLE pa_source (sid integer, delta float) + WITH (autovacuum_enabled=off); +-- insert many rows to the source table +INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT '2017-01-31', id, id * 100, 'initial' FROM generate_series(1,9,3) AS id; +INSERT INTO pa_target SELECT '2017-02-28', id, id * 100, 'initial' FROM generate_series(2,9,3) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING (SELECT '2017-01-15' AS slogts, * FROM pa_source WHERE sid < 10) s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (slogts::timestamp, sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- some complex joins on the source side +-- source relation is an unaliased join +MERGE INTO cj_target t +USING cj_source1 s1 + INNER JOIN cj_source2 s2 ON sid1 = sid2 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid1, delta, sval); +-- try accessing columns from either side of the source join +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid2, delta, sval) +WHEN MATCHED THEN + DELETE; +-- some simple expressions in INSERT targetlist +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid2, delta + scat, sval) +WHEN MATCHED THEN + UPDATE SET val = val || ' updated by merge'; +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 +ON t.tid = sid1 +WHEN MATCHED THEN + UPDATE SET val = val || ' ' || delta::text; +SELECT * FROM cj_target ORDER BY tid; +ALTER TABLE cj_source1 RENAME COLUMN sid1 TO sid; +ALTER TABLE cj_source2 RENAME COLUMN sid2 TO sid; +TRUNCATE cj_target; +MERGE INTO cj_target t +USING cj_source1 s1 + INNER JOIN cj_source2 s2 ON s1.sid = s2.sid +ON t.tid = s1.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s2.sid, delta, sval); +DROP TABLE cj_source2, cj_source1; +DROP TABLE cj_target CASCADE; +-- Function scans +MERGE INTO fs_target t +USING generate_series(1,100,1) AS id +ON t.a = id +WHEN MATCHED THEN + UPDATE SET b = b + id +WHEN NOT MATCHED THEN + INSERT VALUES (id, -1); +MERGE INTO fs_target t +USING generate_series(1,100,2) AS id +ON t.a = id +WHEN MATCHED THEN + UPDATE SET b = b + id, c = 'updated '|| id.*::text +WHEN NOT MATCHED THEN + INSERT VALUES (id, -1, 'inserted ' || id.*::text); +SELECT count(*) FROM fs_target; +DROP TABLE fs_target CASCADE; +-- SERIALIZABLE test +-- handled in isolation tests +-- Inheritance-based partitioning +CREATE TABLE measurement ( + city_id int not null, + logdate date not null, + peaktemp int, + unitsales int +) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2006m02 ( + CHECK ( logdate >= DATE '2006-02-01' AND logdate < DATE '2006-03-01' ) +) INHERITS (measurement) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2006m03 ( + CHECK ( logdate >= DATE '2006-03-01' AND logdate < DATE '2006-04-01' ) +) INHERITS (measurement) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2007m01 ( + filler text, + peaktemp int, + logdate date not null, + city_id int not null, + unitsales int + CHECK ( logdate >= DATE '2007-01-01' AND logdate < DATE '2007-02-01') +) WITH (autovacuum_enabled=off); +ALTER TABLE measurement_y2007m01 DROP COLUMN filler; +ALTER TABLE measurement_y2007m01 INHERIT measurement; +INSERT INTO measurement VALUES (0, '2005-07-21', 5, 15); +CREATE OR REPLACE FUNCTION measurement_insert_trigger() +RETURNS TRIGGER AS $$ +BEGIN + IF ( NEW.logdate >= DATE '2006-02-01' AND + NEW.logdate < DATE '2006-03-01' ) THEN + INSERT INTO measurement_y2006m02 VALUES (NEW.*); + ELSIF ( NEW.logdate >= DATE '2006-03-01' AND + NEW.logdate < DATE '2006-04-01' ) THEN + INSERT INTO measurement_y2006m03 VALUES (NEW.*); + ELSIF ( NEW.logdate >= DATE '2007-01-01' AND + NEW.logdate < DATE '2007-02-01' ) THEN + INSERT INTO measurement_y2007m01 (city_id, logdate, peaktemp, unitsales) + VALUES (NEW.*); + ELSE + RAISE EXCEPTION 'Date out of range. Fix the measurement_insert_trigger() function!'; + END IF; + RETURN NULL; +END; +$$ LANGUAGE plpgsql ; +CREATE TRIGGER insert_measurement_trigger + BEFORE INSERT ON measurement + FOR EACH ROW EXECUTE PROCEDURE measurement_insert_trigger(); +INSERT INTO measurement VALUES (1, '2006-02-10', 35, 10); +INSERT INTO measurement VALUES (1, '2006-02-16', 45, 20); +INSERT INTO measurement VALUES (1, '2006-03-17', 25, 10); +INSERT INTO measurement VALUES (1, '2006-03-27', 15, 40); +INSERT INTO measurement VALUES (1, '2007-01-15', 10, 10); +INSERT INTO measurement VALUES (1, '2007-01-17', 10, 10); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; +CREATE TABLE new_measurement (LIKE measurement) WITH (autovacuum_enabled=off); +INSERT INTO new_measurement VALUES (0, '2005-07-21', 25, 20); +INSERT INTO new_measurement VALUES (1, '2006-03-01', 20, 10); +INSERT INTO new_measurement VALUES (1, '2006-02-16', 50, 10); +INSERT INTO new_measurement VALUES (2, '2006-02-10', 20, 20); +INSERT INTO new_measurement VALUES (1, '2006-03-27', NULL, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-17', NULL, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-15', 5, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-16', 10, 10); +BEGIN; +MERGE INTO ONLY measurement m + USING new_measurement nm ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE +WHEN MATCHED THEN UPDATE + SET peaktemp = greatest(m.peaktemp, nm.peaktemp), + unitsales = m.unitsales + coalesce(nm.unitsales, 0) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id, logdate, peaktemp, unitsales); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate, peaktemp; +ROLLBACK; +MERGE into measurement m + USING new_measurement nm ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE +WHEN MATCHED THEN UPDATE + SET peaktemp = greatest(m.peaktemp, nm.peaktemp), + unitsales = m.unitsales + coalesce(nm.unitsales, 0) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id, logdate, peaktemp, unitsales); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; +BEGIN; +MERGE INTO new_measurement nm + USING ONLY measurement m ON + (nm.city_id = m.city_id and nm.logdate=m.logdate) +WHEN MATCHED THEN DELETE; +SELECT * FROM new_measurement ORDER BY city_id, logdate; +ROLLBACK; +MERGE INTO new_measurement nm + USING measurement m ON + (nm.city_id = m.city_id and nm.logdate=m.logdate) +WHEN MATCHED THEN DELETE; +SELECT * FROM new_measurement ORDER BY city_id, logdate; +DROP TABLE measurement, new_measurement CASCADE; +DROP FUNCTION measurement_insert_trigger(); +RESET SESSION AUTHORIZATION; +DROP TABLE target CASCADE; +DROP TABLE target2 CASCADE; +DROP TABLE source, source2; +DROP FUNCTION merge_trigfunc(); +REVOKE CREATE ON SCHEMA public FROM regress_merge_privs; +DROP USER regress_merge_privs; +DROP USER regress_merge_no_privs; +\o +:DIFF_CMD diff --git a/test/expected/ts_merge-16.out b/test/expected/ts_merge-16.out new file mode 100644 index 00000000000..b06dbb05aae --- /dev/null +++ b/test/expected/ts_merge-16.out @@ -0,0 +1,2551 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +\set ON_ERROR_STOP 0 +\set VERBOSITY default +SET client_min_messages TO error; +\set TEST_BASE_NAME ts_merge +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') AS "TEST_LOAD_NAME", + format('include/%s_load_ht.sql', :'TEST_BASE_NAME') AS "TEST_LOAD_HT_NAME", + format('include/%s_query.sql', :'TEST_BASE_NAME') AS "TEST_QUERY_NAME", + format('%s/results/%s_results.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_WITH_HYPERTABLE", + format('%s/results/%s_ht_results.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_RESULTS_WITH_NO_HYPERTABLE" \gset +SELECT format('\! diff -u --label "Base pg table results" --label "Hyperatable results" %s %s', :'TEST_RESULTS_WITH_HYPERTABLE', :'TEST_RESULTS_WITH_NO_HYPERTABLE') AS "DIFF_CMD" \gset +\ir :TEST_LOAD_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE USER regress_merge_privs; +CREATE USER regress_merge_no_privs; +DROP TABLE IF EXISTS target; +DROP TABLE IF EXISTS source; +CREATE TABLE target (tid integer, balance integer) + WITH (autovacuum_enabled=off); +CREATE TABLE source (sid integer, delta integer) -- no index + WITH (autovacuum_enabled=off); +INSERT INTO target VALUES (1, 10); +INSERT INTO target VALUES (2, 20); +INSERT INTO target VALUES (3, 30); +SELECT t.ctid is not null as matched, t.*, s.* FROM source s FULL OUTER JOIN target t ON s.sid = t.tid ORDER BY t.tid, s.sid; + matched | tid | balance | sid | delta +---------+-----+---------+-----+------- + t | 1 | 10 | | + t | 2 | 20 | | + t | 3 | 30 | | +(3 rows) + +ALTER TABLE target OWNER TO regress_merge_privs; +ALTER TABLE source OWNER TO regress_merge_privs; +CREATE TABLE target2 (tid integer, balance integer) + WITH (autovacuum_enabled=off); +CREATE TABLE source2 (sid integer, delta integer) + WITH (autovacuum_enabled=off); +ALTER TABLE target2 OWNER TO regress_merge_no_privs; +ALTER TABLE source2 OWNER TO regress_merge_no_privs; +GRANT INSERT ON target TO regress_merge_no_privs; +GRANT CREATE ON SCHEMA public TO regress_merge_privs; +SET SESSION AUTHORIZATION regress_merge_privs; +CREATE TABLE sq_target (tid integer NOT NULL, balance integer) + WITH (autovacuum_enabled=off); +CREATE TABLE sq_source (delta integer, sid integer, balance integer DEFAULT 0) + WITH (autovacuum_enabled=off); +INSERT INTO sq_target(tid, balance) VALUES (1,100), (2,200), (3,300); +INSERT INTO sq_source(sid, delta) VALUES (1,10), (2,20), (4,40); +-- conditional WHEN clause +CREATE TABLE wq_target (tid integer not null, balance integer DEFAULT -1) + WITH (autovacuum_enabled=off); +CREATE TABLE wq_source (balance integer, sid integer) + WITH (autovacuum_enabled=off); +INSERT INTO wq_source (sid, balance) VALUES (1, 100); +CREATE TABLE cj_target (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE cj_source1 (sid1 integer, scat integer, delta integer) + WITH (autovacuum_enabled=off); +CREATE TABLE cj_source2 (sid2 integer, sval text) + WITH (autovacuum_enabled=off); +INSERT INTO cj_source1 VALUES (1, 10, 100); +INSERT INTO cj_source1 VALUES (1, 20, 200); +INSERT INTO cj_source1 VALUES (2, 20, 300); +INSERT INTO cj_source1 VALUES (3, 10, 400); +INSERT INTO cj_source2 VALUES (1, 'initial source2'); +INSERT INTO cj_source2 VALUES (2, 'initial source2'); +INSERT INTO cj_source2 VALUES (3, 'initial source2'); +CREATE TABLE fs_target (a int, b int, c text) + WITH (autovacuum_enabled=off); +-- run tests on normal table +\o :TEST_RESULTS_WITH_NO_HYPERTABLE +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- +-- Errors +-- +MERGE INTO target t RANDOMWORD +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:12: ERROR: syntax error at or near "RANDOMWORD" +LINE 1: MERGE INTO target t RANDOMWORD + ^ +-- MATCHED/INSERT error +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:18: ERROR: syntax error at or near "INSERT" +LINE 5: INSERT DEFAULT VALUES; + ^ +-- incorrectly specifying INTO target +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT INTO target DEFAULT VALUES; +psql:include/ts_merge_query.sql:24: ERROR: syntax error at or near "INTO" +LINE 5: INSERT INTO target DEFAULT VALUES; + ^ +-- Multiple VALUES clause +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (1,1), (2,2); +psql:include/ts_merge_query.sql:30: ERROR: syntax error at or near "," +LINE 5: INSERT VALUES (1,1), (2,2); + ^ +-- SELECT query for INSERT +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT SELECT (1, 1); +psql:include/ts_merge_query.sql:36: ERROR: syntax error at or near "SELECT" +LINE 5: INSERT SELECT (1, 1); + ^ +-- NOT MATCHED/UPDATE +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:42: ERROR: syntax error at or near "UPDATE" +LINE 5: UPDATE SET balance = 0; + ^ +-- UPDATE tablename +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE target SET balance = 0; +psql:include/ts_merge_query.sql:48: ERROR: syntax error at or near "target" +LINE 5: UPDATE target SET balance = 0; + ^ +-- source and target names the same +MERGE INTO target +USING target +ON tid = tid +WHEN MATCHED THEN DO NOTHING; +psql:include/ts_merge_query.sql:53: ERROR: name "target" specified more than once +DETAIL: The name is used both as MERGE target table and data source. +-- used in a CTE +WITH foo AS ( + MERGE INTO target USING source ON (true) + WHEN MATCHED THEN DELETE +) SELECT * FROM foo; +psql:include/ts_merge_query.sql:58: ERROR: MERGE not supported in WITH query +LINE 1: WITH foo AS ( + ^ +-- used in COPY +COPY ( + MERGE INTO target USING source ON (true) + WHEN MATCHED THEN DELETE +) TO stdout; +psql:include/ts_merge_query.sql:63: ERROR: MERGE not supported in COPY +-- unsupported relation types +-- view +CREATE VIEW tv AS SELECT * FROM target; +MERGE INTO tv t +USING source s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:72: ERROR: cannot execute MERGE on relation "tv" +DETAIL: This operation is not supported for views. +DROP VIEW tv; +-- materialized view +CREATE MATERIALIZED VIEW mv AS SELECT * FROM target; +MERGE INTO mv t +USING source s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:81: ERROR: cannot execute MERGE on relation "mv" +DETAIL: This operation is not supported for materialized views. +DROP MATERIALIZED VIEW mv; +-- permissions +MERGE INTO target +USING source2 +ON target.tid = source2.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:90: ERROR: permission denied for table source2 +GRANT INSERT ON target TO regress_merge_no_privs; +SET SESSION AUTHORIZATION regress_merge_no_privs; +MERGE INTO target +USING source2 +ON target.tid = source2.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:99: ERROR: permission denied for table target +GRANT UPDATE ON target2 TO regress_merge_privs; +SET SESSION AUTHORIZATION regress_merge_privs; +MERGE INTO target2 +USING source +ON target2.tid = source.sid +WHEN MATCHED THEN + DELETE; +psql:include/ts_merge_query.sql:108: ERROR: permission denied for table target2 +MERGE INTO target2 +USING source +ON target2.tid = source.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:114: ERROR: permission denied for table target2 +-- check if the target can be accessed from source relation subquery; we should +-- not be able to do so +MERGE INTO target t +USING (SELECT * FROM source WHERE t.tid > sid) s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:122: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 2: USING (SELECT * FROM source WHERE t.tid > sid) s + ^ +DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. +-- +-- initial tests +-- +-- zero rows in source has no effect +MERGE INTO target +USING source +ON target.tid = source.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +ROLLBACK; +-- insert some non-matching source rows to work from +INSERT INTO source VALUES (4, 40); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + DO NOTHING; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (5, 50); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- index plans +INSERT INTO target SELECT generate_series(1000,2500), 0; +ALTER TABLE target ADD PRIMARY KEY (tid); +ANALYZE target; +DELETE FROM target WHERE tid > 100; +ANALYZE target; +-- insert some matching source rows to work from +INSERT INTO source VALUES (2, 5); +INSERT INTO source VALUES (3, 20); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- equivalent of an UPDATE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- equivalent of a DELETE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DO NOTHING; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- duplicate source row causes multiple target row update ERROR +INSERT INTO source VALUES (2, 5); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:241: ERROR: MERGE command cannot affect row a second time +HINT: Ensure that not more than one source row matches any one target row. +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +psql:include/ts_merge_query.sql:249: ERROR: MERGE command cannot affect row a second time +HINT: Ensure that not more than one source row matches any one target row. +ROLLBACK; +-- remove duplicate MATCHED data from source data +DELETE FROM source WHERE sid = 2; +INSERT INTO source VALUES (2, 5); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- duplicate source row on INSERT should fail because of target_pkey +INSERT INTO source VALUES (4, 40); +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); +psql:include/ts_merge_query.sql:265: ERROR: duplicate key value violates unique constraint "target_pkey" +DETAIL: Key (tid)=(4) already exists. +SELECT * FROM target ORDER BY tid; +psql:include/ts_merge_query.sql:266: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- remove duplicate NOT MATCHED data from source data +DELETE FROM source WHERE sid = 4; +INSERT INTO source VALUES (4, 40); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- remove constraints +alter table target drop CONSTRAINT target_pkey; +alter table target alter column tid drop not null; +-- multiple actions +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, 4) +WHEN MATCHED THEN + UPDATE SET balance = 0; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- should be equivalent +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0 +WHEN NOT MATCHED THEN + INSERT VALUES (4, 4); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- column references +-- do a simple equivalent of an UPDATE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance + s.delta; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- do a simple equivalent of an INSERT SELECT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- and again with duplicate source rows +INSERT INTO source VALUES (5, 50); +INSERT INTO source VALUES (5, 50); +-- do a simple equivalent of an INSERT SELECT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- removing duplicate source rows +DELETE FROM source WHERE sid = 5; +-- and again with explicitly identified column list +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- and again with a subtle error: referring to non-existent target row for NOT MATCHED +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (t.tid, s.delta); +psql:include/ts_merge_query.sql:356: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); + ^ +DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. +-- and again with a constant ON clause +BEGIN; +MERGE INTO target t +USING source AS s +ON (SELECT true) +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (t.tid, s.delta); +psql:include/ts_merge_query.sql:364: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); + ^ +DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. +SELECT * FROM target ORDER BY tid; +psql:include/ts_merge_query.sql:365: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- now the classic UPSERT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance + s.delta +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- this time with a FALSE condition +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND FALSE THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +-- this time with an actual condition which returns false +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance <> 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +BEGIN; +-- and now with a condition which returns true +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +ROLLBACK; +-- conditions in the NOT MATCHED clause can only refer to source columns +BEGIN; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND t.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +psql:include/ts_merge_query.sql:408: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 3: WHEN NOT MATCHED AND t.balance = 100 THEN + ^ +DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. +SELECT * FROM wq_target; +psql:include/ts_merge_query.sql:409: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +-- conditions in MATCHED clause can refer to both source and target +SELECT * FROM wq_source; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND s.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check if AND works +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 AND s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 AND s.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check if OR works +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 OR s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 199 OR s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check source-side whole-row references +BEGIN; +MERGE INTO wq_target t +USING wq_source s ON (t.tid = s.sid) +WHEN matched and t = s or t.tid = s.sid THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +ROLLBACK; +-- check if subqueries work in the conditions? +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance > (SELECT max(balance) FROM target) THEN + UPDATE SET balance = t.balance + s.balance; +-- check if we can access system columns in the conditions +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.xmin = t.xmax THEN + UPDATE SET balance = t.balance + s.balance; +psql:include/ts_merge_query.sql:477: ERROR: cannot use system column "xmin" in MERGE WHEN condition +LINE 3: WHEN MATCHED AND t.xmin = t.xmax THEN + ^ +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.tableoid >= 0 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +DROP TABLE wq_target CASCADE; +DROP TABLE wq_source; +-- test triggers +create or replace function merge_trigfunc () returns trigger +language plpgsql as +$$ +DECLARE + line text; +BEGIN + SELECT INTO line format('%s %s %s trigger%s', + TG_WHEN, TG_OP, TG_LEVEL, CASE + WHEN TG_OP = 'INSERT' AND TG_LEVEL = 'ROW' + THEN format(' row: %s', NEW) + WHEN TG_OP = 'UPDATE' AND TG_LEVEL = 'ROW' + THEN format(' row: %s -> %s', OLD, NEW) + WHEN TG_OP = 'DELETE' AND TG_LEVEL = 'ROW' + THEN format(' row: %s', OLD) + END); + + RAISE NOTICE '%', line; + IF (TG_WHEN = 'BEFORE' AND TG_LEVEL = 'ROW') THEN + IF (TG_OP = 'DELETE') THEN + RETURN OLD; + ELSE + RETURN NEW; + END IF; + ELSE + RETURN NULL; + END IF; +END; +$$; +CREATE TRIGGER merge_bsi BEFORE INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bsu BEFORE UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bsd BEFORE DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asi AFTER INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asu AFTER UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asd AFTER DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bri BEFORE INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bru BEFORE UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_brd BEFORE DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_ari AFTER INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_aru AFTER UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_ard AFTER DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +-- now the classic UPSERT, with a DELETE +BEGIN; +UPDATE target SET balance = 0 WHERE tid = 3; +--EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND t.balance > s.delta THEN + UPDATE SET balance = t.balance - s.delta +WHEN MATCHED THEN + DELETE +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- Test behavior of triggers that turn UPDATE/DELETE into no-ops +create or replace function skip_merge_op() returns trigger +language plpgsql as +$$ +BEGIN + RETURN NULL; +END; +$$; +SELECT * FROM target full outer join source on (sid = tid); +create trigger merge_skip BEFORE INSERT OR UPDATE or DELETE + ON target FOR EACH ROW EXECUTE FUNCTION skip_merge_op(); +DO $$ +DECLARE + result integer; +BEGIN +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND s.sid = 3 THEN UPDATE SET balance = t.balance + s.delta +WHEN MATCHED THEN DELETE +WHEN NOT MATCHED THEN INSERT VALUES (sid, delta); +IF FOUND THEN + RAISE NOTICE 'Found'; +ELSE + RAISE NOTICE 'Not found'; +END IF; +GET DIAGNOSTICS result := ROW_COUNT; +RAISE NOTICE 'ROW_COUNT = %', result; +END; +$$; +SELECT * FROM target FULL OUTER JOIN source ON (sid = tid); +DROP TRIGGER merge_skip ON target; +DROP FUNCTION skip_merge_op(); +-- test from PL/pgSQL +-- make sure MERGE INTO isn't interpreted to mean returning variables like SELECT INTO +BEGIN; +DO LANGUAGE plpgsql $$ +BEGIN +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND t.balance > s.delta THEN + UPDATE SET balance = t.balance - s.delta; +END; +$$; +ROLLBACK; +--source constants +BEGIN; +MERGE INTO target t +USING (SELECT 9 AS sid, 57 AS delta) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +--source query +BEGIN; +MERGE INTO target t +USING (SELECT sid, delta FROM source WHERE delta > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING (SELECT sid, delta as newname FROM source WHERE delta > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.newname); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +--self-merge +BEGIN; +MERGE INTO target t1 +USING target t2 +ON t1.tid = t2.tid +WHEN MATCHED THEN + UPDATE SET balance = t1.balance + t2.balance +WHEN NOT MATCHED THEN + INSERT VALUES (t2.tid, t2.balance); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING (SELECT tid as sid, balance as delta FROM target WHERE balance > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING +(SELECT sid, max(delta) AS delta + FROM source + GROUP BY sid + HAVING count(*) = 1 + ORDER BY sid ASC) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- plpgsql parameters and results +BEGIN; +CREATE FUNCTION merge_func (p_id integer, p_bal integer) +RETURNS INTEGER +LANGUAGE plpgsql +AS $$ +DECLARE + result integer; +BEGIN +MERGE INTO target t +USING (SELECT p_id AS sid) AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance - p_bal; +IF FOUND THEN + GET DIAGNOSTICS result := ROW_COUNT; +END IF; +RETURN result; +END; +$$; +SELECT merge_func(3, 4); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- PREPARE +BEGIN; +prepare foom as merge into target t using (select 1 as sid) s on (t.tid = s.sid) when matched then update set balance = 1; +execute foom; +ROLLBACK; +BEGIN; +PREPARE foom2 (integer, integer) AS +MERGE INTO target t +USING (SELECT 1) s +ON t.tid = $1 +WHEN MATCHED THEN +UPDATE SET balance = $2; +--EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +execute foom2 (1, 1); +ROLLBACK; +-- subqueries in source relation +BEGIN; +MERGE INTO sq_target t +USING (SELECT * FROM sq_source) s +ON tid = sid +WHEN MATCHED AND t.balance > delta THEN + UPDATE SET balance = t.balance + delta; +SELECT * FROM sq_target ORDER BY tid; +ROLLBACK; +-- try a view +CREATE VIEW v AS SELECT * FROM sq_source WHERE sid < 2; +BEGIN; +MERGE INTO sq_target +USING v +ON tid = sid +WHEN MATCHED THEN + UPDATE SET balance = v.balance + delta; +SELECT * FROM sq_target ORDER BY tid; +ROLLBACK; +-- ambiguous reference to a column +BEGIN; +MERGE INTO sq_target +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +psql:include/ts_merge_query.sql:732: ERROR: column reference "balance" is ambiguous +LINE 5: UPDATE SET balance = balance + delta + ^ +ROLLBACK; +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +SELECT * FROM sq_target; +ROLLBACK; +-- CTEs +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +WITH targq AS ( + SELECT * FROM v +) +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +ROLLBACK; +-- RETURNING +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE +RETURNING *; +psql:include/ts_merge_query.sql:778: ERROR: syntax error at or near "RETURNING" +LINE 10: RETURNING *; + ^ +ROLLBACK; +-- EXPLAIN +CREATE TABLE ex_mtarget (a int, b int) + WITH (autovacuum_enabled=off); +CREATE TABLE ex_msource (a int, b int) + WITH (autovacuum_enabled=off); +INSERT INTO ex_mtarget SELECT i, i*10 FROM generate_series(1,100,2) i; +INSERT INTO ex_msource SELECT i, i*10 FROM generate_series(1,100,1) i; +CREATE FUNCTION explain_merge(query text) RETURNS SETOF text +LANGUAGE plpgsql AS +$$ +DECLARE ln text; +BEGIN + FOR ln IN + EXECUTE 'explain (analyze, timing off, summary off, costs off) ' || + query + LOOP + ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g'); + RETURN NEXT ln; + END LOOP; +END; +$$; +-- only updates +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED THEN + UPDATE SET b = t.b + 1'); +-- only updates to selected tuples +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1'); +-- updates + deletes +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1 +WHEN MATCHED AND t.a >= 10 AND t.a <= 20 THEN + DELETE'); +-- only inserts +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN NOT MATCHED AND s.a < 10 THEN + INSERT VALUES (a, b)'); +-- all three +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1 +WHEN MATCHED AND t.a >= 30 AND t.a <= 40 THEN + DELETE +WHEN NOT MATCHED AND s.a < 20 THEN + INSERT VALUES (a, b)'); +-- nothing +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a AND t.a < -1000 +WHEN MATCHED AND t.a < 10 THEN + DO NOTHING'); +DROP TABLE ex_msource, ex_mtarget; +DROP FUNCTION explain_merge(text); +-- Subqueries +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED THEN + UPDATE SET balance = (SELECT count(*) FROM sq_target); +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND (SELECT count(*) > 0 FROM sq_target) THEN + UPDATE SET balance = 42; +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid AND (SELECT count(*) > 0 FROM sq_target) +WHEN MATCHED THEN + UPDATE SET balance = 42; +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +DROP TABLE sq_target CASCADE; +DROP TABLE sq_source CASCADE; +CREATE TABLE pa_target (tid integer, balance float, val text) + PARTITION BY LIST (tid); +CREATE TABLE part1 PARTITION OF pa_target FOR VALUES IN (1,4) + WITH (autovacuum_enabled=off); +CREATE TABLE part2 PARTITION OF pa_target FOR VALUES IN (2,5,6) + WITH (autovacuum_enabled=off); +CREATE TABLE part3 PARTITION OF pa_target FOR VALUES IN (3,8,9) + WITH (autovacuum_enabled=off); +CREATE TABLE part4 PARTITION OF pa_target DEFAULT + WITH (autovacuum_enabled=off); +CREATE TABLE pa_source (sid integer, delta float); +-- insert many rows to the source table +INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- same with a constant qual +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND tid = 1 + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- try updating the partition key column +BEGIN; +CREATE FUNCTION merge_func() RETURNS integer LANGUAGE plpgsql AS $$ +DECLARE + result integer; +BEGIN +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +IF FOUND THEN + GET DIAGNOSTICS result := ROW_COUNT; +END IF; +RETURN result; +END; +$$; +SELECT merge_func(); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_target CASCADE; +-- The target table is partitioned in the same way, but this time by attaching +-- partitions which have columns in different order, dropped columns etc. +CREATE TABLE pa_target (tid integer, balance float, val text) + PARTITION BY LIST (tid); +CREATE TABLE part1 (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part2 (balance float, tid integer, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part3 (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part4 (extraid text, tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +ALTER TABLE part4 DROP COLUMN extraid; +ALTER TABLE pa_target ATTACH PARTITION part1 FOR VALUES IN (1,4); +ALTER TABLE pa_target ATTACH PARTITION part2 FOR VALUES IN (2,5,6); +ALTER TABLE pa_target ATTACH PARTITION part3 FOR VALUES IN (3,8,9); +ALTER TABLE pa_target ATTACH PARTITION part4 DEFAULT; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- same with a constant qual +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND tid IN (1, 5) + WHEN MATCHED AND tid % 5 = 0 THEN DELETE + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- try updating the partition key column +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- Sub-partitioning +CREATE TABLE pa_target (logts timestamp, tid integer, balance float, val text) + PARTITION BY RANGE (logts); +CREATE TABLE part_m01 PARTITION OF pa_target + FOR VALUES FROM ('2017-01-01') TO ('2017-02-01') + PARTITION BY LIST (tid); +CREATE TABLE part_m01_odd PARTITION OF part_m01 + FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); +CREATE TABLE part_m01_even PARTITION OF part_m01 + FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); +CREATE TABLE part_m02 PARTITION OF pa_target + FOR VALUES FROM ('2017-02-01') TO ('2017-03-01') + PARTITION BY LIST (tid); +CREATE TABLE part_m02_odd PARTITION OF part_m02 + FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); +CREATE TABLE part_m02_even PARTITION OF part_m02 + FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); +CREATE TABLE pa_source (sid integer, delta float) + WITH (autovacuum_enabled=off); +-- insert many rows to the source table +INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT '2017-01-31', id, id * 100, 'initial' FROM generate_series(1,9,3) AS id; +INSERT INTO pa_target SELECT '2017-02-28', id, id * 100, 'initial' FROM generate_series(2,9,3) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING (SELECT '2017-01-15' AS slogts, * FROM pa_source WHERE sid < 10) s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (slogts::timestamp, sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- some complex joins on the source side +-- source relation is an unaliased join +MERGE INTO cj_target t +USING cj_source1 s1 + INNER JOIN cj_source2 s2 ON sid1 = sid2 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid1, delta, sval); +-- try accessing columns from either side of the source join +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid2, delta, sval) +WHEN MATCHED THEN + DELETE; +-- some simple expressions in INSERT targetlist +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid2, delta + scat, sval) +WHEN MATCHED THEN + UPDATE SET val = val || ' updated by merge'; +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 +ON t.tid = sid1 +WHEN MATCHED THEN + UPDATE SET val = val || ' ' || delta::text; +SELECT * FROM cj_target ORDER BY tid; +ALTER TABLE cj_source1 RENAME COLUMN sid1 TO sid; +ALTER TABLE cj_source2 RENAME COLUMN sid2 TO sid; +TRUNCATE cj_target; +MERGE INTO cj_target t +USING cj_source1 s1 + INNER JOIN cj_source2 s2 ON s1.sid = s2.sid +ON t.tid = s1.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s2.sid, delta, sval); +DROP TABLE cj_source2, cj_source1; +DROP TABLE cj_target CASCADE; +-- Function scans +MERGE INTO fs_target t +USING generate_series(1,100,1) AS id +ON t.a = id +WHEN MATCHED THEN + UPDATE SET b = b + id +WHEN NOT MATCHED THEN + INSERT VALUES (id, -1); +MERGE INTO fs_target t +USING generate_series(1,100,2) AS id +ON t.a = id +WHEN MATCHED THEN + UPDATE SET b = b + id, c = 'updated '|| id.*::text +WHEN NOT MATCHED THEN + INSERT VALUES (id, -1, 'inserted ' || id.*::text); +SELECT count(*) FROM fs_target; +DROP TABLE fs_target CASCADE; +-- SERIALIZABLE test +-- handled in isolation tests +-- Inheritance-based partitioning +CREATE TABLE measurement ( + city_id int not null, + logdate date not null, + peaktemp int, + unitsales int +) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2006m02 ( + CHECK ( logdate >= DATE '2006-02-01' AND logdate < DATE '2006-03-01' ) +) INHERITS (measurement) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2006m03 ( + CHECK ( logdate >= DATE '2006-03-01' AND logdate < DATE '2006-04-01' ) +) INHERITS (measurement) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2007m01 ( + filler text, + peaktemp int, + logdate date not null, + city_id int not null, + unitsales int + CHECK ( logdate >= DATE '2007-01-01' AND logdate < DATE '2007-02-01') +) WITH (autovacuum_enabled=off); +ALTER TABLE measurement_y2007m01 DROP COLUMN filler; +ALTER TABLE measurement_y2007m01 INHERIT measurement; +INSERT INTO measurement VALUES (0, '2005-07-21', 5, 15); +CREATE OR REPLACE FUNCTION measurement_insert_trigger() +RETURNS TRIGGER AS $$ +BEGIN + IF ( NEW.logdate >= DATE '2006-02-01' AND + NEW.logdate < DATE '2006-03-01' ) THEN + INSERT INTO measurement_y2006m02 VALUES (NEW.*); + ELSIF ( NEW.logdate >= DATE '2006-03-01' AND + NEW.logdate < DATE '2006-04-01' ) THEN + INSERT INTO measurement_y2006m03 VALUES (NEW.*); + ELSIF ( NEW.logdate >= DATE '2007-01-01' AND + NEW.logdate < DATE '2007-02-01' ) THEN + INSERT INTO measurement_y2007m01 (city_id, logdate, peaktemp, unitsales) + VALUES (NEW.*); + ELSE + RAISE EXCEPTION 'Date out of range. Fix the measurement_insert_trigger() function!'; + END IF; + RETURN NULL; +END; +$$ LANGUAGE plpgsql ; +CREATE TRIGGER insert_measurement_trigger + BEFORE INSERT ON measurement + FOR EACH ROW EXECUTE PROCEDURE measurement_insert_trigger(); +INSERT INTO measurement VALUES (1, '2006-02-10', 35, 10); +INSERT INTO measurement VALUES (1, '2006-02-16', 45, 20); +INSERT INTO measurement VALUES (1, '2006-03-17', 25, 10); +INSERT INTO measurement VALUES (1, '2006-03-27', 15, 40); +INSERT INTO measurement VALUES (1, '2007-01-15', 10, 10); +INSERT INTO measurement VALUES (1, '2007-01-17', 10, 10); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; +CREATE TABLE new_measurement (LIKE measurement) WITH (autovacuum_enabled=off); +INSERT INTO new_measurement VALUES (0, '2005-07-21', 25, 20); +INSERT INTO new_measurement VALUES (1, '2006-03-01', 20, 10); +INSERT INTO new_measurement VALUES (1, '2006-02-16', 50, 10); +INSERT INTO new_measurement VALUES (2, '2006-02-10', 20, 20); +INSERT INTO new_measurement VALUES (1, '2006-03-27', NULL, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-17', NULL, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-15', 5, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-16', 10, 10); +BEGIN; +MERGE INTO ONLY measurement m + USING new_measurement nm ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE +WHEN MATCHED THEN UPDATE + SET peaktemp = greatest(m.peaktemp, nm.peaktemp), + unitsales = m.unitsales + coalesce(nm.unitsales, 0) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id, logdate, peaktemp, unitsales); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate, peaktemp; +ROLLBACK; +MERGE into measurement m + USING new_measurement nm ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE +WHEN MATCHED THEN UPDATE + SET peaktemp = greatest(m.peaktemp, nm.peaktemp), + unitsales = m.unitsales + coalesce(nm.unitsales, 0) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id, logdate, peaktemp, unitsales); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; +BEGIN; +MERGE INTO new_measurement nm + USING ONLY measurement m ON + (nm.city_id = m.city_id and nm.logdate=m.logdate) +WHEN MATCHED THEN DELETE; +SELECT * FROM new_measurement ORDER BY city_id, logdate; +ROLLBACK; +MERGE INTO new_measurement nm + USING measurement m ON + (nm.city_id = m.city_id and nm.logdate=m.logdate) +WHEN MATCHED THEN DELETE; +SELECT * FROM new_measurement ORDER BY city_id, logdate; +DROP TABLE measurement, new_measurement CASCADE; +DROP FUNCTION measurement_insert_trigger(); +RESET SESSION AUTHORIZATION; +DROP TABLE target CASCADE; +DROP TABLE target2 CASCADE; +DROP TABLE source, source2; +DROP FUNCTION merge_trigfunc(); +REVOKE CREATE ON SCHEMA public FROM regress_merge_privs; +DROP USER regress_merge_privs; +DROP USER regress_merge_no_privs; +\o +\ir :TEST_LOAD_HT_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE USER regress_merge_privs; +CREATE USER regress_merge_no_privs; +DROP TABLE IF EXISTS target; +DROP TABLE IF EXISTS source; +CREATE TABLE target (tid integer, balance integer) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('target', 'tid', chunk_time_interval => 3); + create_hypertable +--------------------- + (1,public,target,t) +(1 row) + +CREATE TABLE source (sid integer, delta integer) -- no index + WITH (autovacuum_enabled=off); +INSERT INTO target VALUES (1, 10); +INSERT INTO target VALUES (2, 20); +INSERT INTO target VALUES (3, 30); +SELECT t.ctid is not null as matched, t.*, s.* FROM source s FULL OUTER JOIN target t ON s.sid = t.tid ORDER BY t.tid, s.sid; + matched | tid | balance | sid | delta +---------+-----+---------+-----+------- + t | 1 | 10 | | + t | 2 | 20 | | + t | 3 | 30 | | +(3 rows) + +ALTER TABLE target OWNER TO regress_merge_privs; +ALTER TABLE source OWNER TO regress_merge_privs; +CREATE TABLE target2 (tid integer, balance integer) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('target2', 'tid', chunk_time_interval => 3); + create_hypertable +---------------------- + (2,public,target2,t) +(1 row) + +CREATE TABLE source2 (sid integer, delta integer) + WITH (autovacuum_enabled=off); +ALTER TABLE target2 OWNER TO regress_merge_no_privs; +ALTER TABLE source2 OWNER TO regress_merge_no_privs; +GRANT INSERT ON target TO regress_merge_no_privs; +GRANT CREATE ON SCHEMA public TO regress_merge_privs; +SET SESSION AUTHORIZATION regress_merge_privs; +CREATE TABLE sq_target (tid integer NOT NULL, balance integer) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('sq_target', 'tid', chunk_time_interval => 3); + create_hypertable +------------------------ + (3,public,sq_target,t) +(1 row) + +CREATE TABLE sq_source (delta integer, sid integer, balance integer DEFAULT 0) + WITH (autovacuum_enabled=off); +INSERT INTO sq_target(tid, balance) VALUES (1,100), (2,200), (3,300); +INSERT INTO sq_source(sid, delta) VALUES (1,10), (2,20), (4,40); +-- conditional WHEN clause +CREATE TABLE wq_target (tid integer not null, balance integer DEFAULT -1) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('wq_target', 'tid', chunk_time_interval => 3); + create_hypertable +------------------------ + (4,public,wq_target,t) +(1 row) + +CREATE TABLE wq_source (balance integer, sid integer) + WITH (autovacuum_enabled=off); +INSERT INTO wq_source (sid, balance) VALUES (1, 100); +-- some complex joins on the source side +CREATE TABLE cj_target (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('cj_target', 'tid', chunk_time_interval => 3); + create_hypertable +------------------------ + (5,public,cj_target,t) +(1 row) + +CREATE TABLE cj_source1 (sid1 integer, scat integer, delta integer) + WITH (autovacuum_enabled=off); +CREATE TABLE cj_source2 (sid2 integer, sval text) + WITH (autovacuum_enabled=off); +INSERT INTO cj_source1 VALUES (1, 10, 100); +INSERT INTO cj_source1 VALUES (1, 20, 200); +INSERT INTO cj_source1 VALUES (2, 20, 300); +INSERT INTO cj_source1 VALUES (3, 10, 400); +INSERT INTO cj_source2 VALUES (1, 'initial source2'); +INSERT INTO cj_source2 VALUES (2, 'initial source2'); +INSERT INTO cj_source2 VALUES (3, 'initial source2'); +CREATE TABLE fs_target (a int, b int, c text) + WITH (autovacuum_enabled=off); +SELECT create_hypertable('fs_target', 'a', chunk_time_interval => 3); + create_hypertable +------------------------ + (6,public,fs_target,t) +(1 row) + +-- run tests on hypertable +\o :TEST_RESULTS_WITH_HYPERTABLE +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- +-- Errors +-- +MERGE INTO target t RANDOMWORD +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:12: ERROR: syntax error at or near "RANDOMWORD" +LINE 1: MERGE INTO target t RANDOMWORD + ^ +-- MATCHED/INSERT error +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:18: ERROR: syntax error at or near "INSERT" +LINE 5: INSERT DEFAULT VALUES; + ^ +-- incorrectly specifying INTO target +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT INTO target DEFAULT VALUES; +psql:include/ts_merge_query.sql:24: ERROR: syntax error at or near "INTO" +LINE 5: INSERT INTO target DEFAULT VALUES; + ^ +-- Multiple VALUES clause +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (1,1), (2,2); +psql:include/ts_merge_query.sql:30: ERROR: syntax error at or near "," +LINE 5: INSERT VALUES (1,1), (2,2); + ^ +-- SELECT query for INSERT +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT SELECT (1, 1); +psql:include/ts_merge_query.sql:36: ERROR: syntax error at or near "SELECT" +LINE 5: INSERT SELECT (1, 1); + ^ +-- NOT MATCHED/UPDATE +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:42: ERROR: syntax error at or near "UPDATE" +LINE 5: UPDATE SET balance = 0; + ^ +-- UPDATE tablename +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE target SET balance = 0; +psql:include/ts_merge_query.sql:48: ERROR: syntax error at or near "target" +LINE 5: UPDATE target SET balance = 0; + ^ +-- source and target names the same +MERGE INTO target +USING target +ON tid = tid +WHEN MATCHED THEN DO NOTHING; +psql:include/ts_merge_query.sql:53: ERROR: name "target" specified more than once +DETAIL: The name is used both as MERGE target table and data source. +-- used in a CTE +WITH foo AS ( + MERGE INTO target USING source ON (true) + WHEN MATCHED THEN DELETE +) SELECT * FROM foo; +psql:include/ts_merge_query.sql:58: ERROR: MERGE not supported in WITH query +LINE 1: WITH foo AS ( + ^ +-- used in COPY +COPY ( + MERGE INTO target USING source ON (true) + WHEN MATCHED THEN DELETE +) TO stdout; +psql:include/ts_merge_query.sql:63: ERROR: MERGE not supported in COPY +-- unsupported relation types +-- view +CREATE VIEW tv AS SELECT * FROM target; +MERGE INTO tv t +USING source s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:72: ERROR: cannot execute MERGE on relation "tv" +DETAIL: This operation is not supported for views. +DROP VIEW tv; +-- materialized view +CREATE MATERIALIZED VIEW mv AS SELECT * FROM target; +MERGE INTO mv t +USING source s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:81: ERROR: cannot execute MERGE on relation "mv" +DETAIL: This operation is not supported for materialized views. +DROP MATERIALIZED VIEW mv; +-- permissions +MERGE INTO target +USING source2 +ON target.tid = source2.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:90: ERROR: permission denied for table source2 +GRANT INSERT ON target TO regress_merge_no_privs; +SET SESSION AUTHORIZATION regress_merge_no_privs; +MERGE INTO target +USING source2 +ON target.tid = source2.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:99: ERROR: permission denied for table target +GRANT UPDATE ON target2 TO regress_merge_privs; +SET SESSION AUTHORIZATION regress_merge_privs; +MERGE INTO target2 +USING source +ON target2.tid = source.sid +WHEN MATCHED THEN + DELETE; +psql:include/ts_merge_query.sql:108: ERROR: permission denied for table target2 +MERGE INTO target2 +USING source +ON target2.tid = source.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:114: ERROR: permission denied for table target2 +-- check if the target can be accessed from source relation subquery; we should +-- not be able to do so +MERGE INTO target t +USING (SELECT * FROM source WHERE t.tid > sid) s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +psql:include/ts_merge_query.sql:122: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 2: USING (SELECT * FROM source WHERE t.tid > sid) s + ^ +DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. +-- +-- initial tests +-- +-- zero rows in source has no effect +MERGE INTO target +USING source +ON target.tid = source.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +ROLLBACK; +-- insert some non-matching source rows to work from +INSERT INTO source VALUES (4, 40); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + DO NOTHING; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (5, 50); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- index plans +INSERT INTO target SELECT generate_series(1000,2500), 0; +ALTER TABLE target ADD PRIMARY KEY (tid); +ANALYZE target; +DELETE FROM target WHERE tid > 100; +ANALYZE target; +-- insert some matching source rows to work from +INSERT INTO source VALUES (2, 5); +INSERT INTO source VALUES (3, 20); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- equivalent of an UPDATE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- equivalent of a DELETE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DO NOTHING; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- duplicate source row causes multiple target row update ERROR +INSERT INTO source VALUES (2, 5); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +psql:include/ts_merge_query.sql:241: ERROR: MERGE command cannot affect row a second time +HINT: Ensure that not more than one source row matches any one target row. +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +psql:include/ts_merge_query.sql:249: ERROR: MERGE command cannot affect row a second time +HINT: Ensure that not more than one source row matches any one target row. +ROLLBACK; +-- remove duplicate MATCHED data from source data +DELETE FROM source WHERE sid = 2; +INSERT INTO source VALUES (2, 5); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- duplicate source row on INSERT should fail because of target_pkey +INSERT INTO source VALUES (4, 40); +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); +psql:include/ts_merge_query.sql:265: ERROR: duplicate key value violates unique constraint "2_2_target_pkey" +DETAIL: Key (tid)=(4) already exists. +SELECT * FROM target ORDER BY tid; +psql:include/ts_merge_query.sql:266: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- remove duplicate NOT MATCHED data from source data +DELETE FROM source WHERE sid = 4; +INSERT INTO source VALUES (4, 40); +SELECT * FROM source ORDER BY sid; +SELECT * FROM target ORDER BY tid; +-- remove constraints +alter table target drop CONSTRAINT target_pkey; +alter table target alter column tid drop not null; +psql:include/ts_merge_query.sql:277: ERROR: cannot drop not-null constraint from a time-partitioned column +-- multiple actions +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, 4) +WHEN MATCHED THEN + UPDATE SET balance = 0; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- should be equivalent +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0 +WHEN NOT MATCHED THEN + INSERT VALUES (4, 4); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- column references +-- do a simple equivalent of an UPDATE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance + s.delta; +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- do a simple equivalent of an INSERT SELECT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- and again with duplicate source rows +INSERT INTO source VALUES (5, 50); +INSERT INTO source VALUES (5, 50); +-- do a simple equivalent of an INSERT SELECT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- removing duplicate source rows +DELETE FROM source WHERE sid = 5; +-- and again with explicitly identified column list +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- and again with a subtle error: referring to non-existent target row for NOT MATCHED +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (t.tid, s.delta); +psql:include/ts_merge_query.sql:356: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); + ^ +DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. +-- and again with a constant ON clause +BEGIN; +MERGE INTO target t +USING source AS s +ON (SELECT true) +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (t.tid, s.delta); +psql:include/ts_merge_query.sql:364: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); + ^ +DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. +SELECT * FROM target ORDER BY tid; +psql:include/ts_merge_query.sql:365: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- now the classic UPSERT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance + s.delta +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- this time with a FALSE condition +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND FALSE THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +-- this time with an actual condition which returns false +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance <> 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +BEGIN; +-- and now with a condition which returns true +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +ROLLBACK; +-- conditions in the NOT MATCHED clause can only refer to source columns +BEGIN; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND t.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +psql:include/ts_merge_query.sql:408: ERROR: invalid reference to FROM-clause entry for table "t" +LINE 3: WHEN NOT MATCHED AND t.balance = 100 THEN + ^ +DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. +SELECT * FROM wq_target; +psql:include/ts_merge_query.sql:409: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; +-- conditions in MATCHED clause can refer to both source and target +SELECT * FROM wq_source; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND s.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check if AND works +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 AND s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 AND s.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check if OR works +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 OR s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 199 OR s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +-- check source-side whole-row references +BEGIN; +MERGE INTO wq_target t +USING wq_source s ON (t.tid = s.sid) +WHEN matched and t = s or t.tid = s.sid THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +ROLLBACK; +-- check if subqueries work in the conditions? +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance > (SELECT max(balance) FROM target) THEN + UPDATE SET balance = t.balance + s.balance; +-- check if we can access system columns in the conditions +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.xmin = t.xmax THEN + UPDATE SET balance = t.balance + s.balance; +psql:include/ts_merge_query.sql:477: ERROR: cannot use system column "xmin" in MERGE WHEN condition +LINE 3: WHEN MATCHED AND t.xmin = t.xmax THEN + ^ +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.tableoid >= 0 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; +DROP TABLE wq_target CASCADE; +DROP TABLE wq_source; +-- test triggers +create or replace function merge_trigfunc () returns trigger +language plpgsql as +$$ +DECLARE + line text; +BEGIN + SELECT INTO line format('%s %s %s trigger%s', + TG_WHEN, TG_OP, TG_LEVEL, CASE + WHEN TG_OP = 'INSERT' AND TG_LEVEL = 'ROW' + THEN format(' row: %s', NEW) + WHEN TG_OP = 'UPDATE' AND TG_LEVEL = 'ROW' + THEN format(' row: %s -> %s', OLD, NEW) + WHEN TG_OP = 'DELETE' AND TG_LEVEL = 'ROW' + THEN format(' row: %s', OLD) + END); + + RAISE NOTICE '%', line; + IF (TG_WHEN = 'BEFORE' AND TG_LEVEL = 'ROW') THEN + IF (TG_OP = 'DELETE') THEN + RETURN OLD; + ELSE + RETURN NEW; + END IF; + ELSE + RETURN NULL; + END IF; +END; +$$; +CREATE TRIGGER merge_bsi BEFORE INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bsu BEFORE UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bsd BEFORE DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asi AFTER INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asu AFTER UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asd AFTER DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bri BEFORE INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bru BEFORE UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_brd BEFORE DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_ari AFTER INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_aru AFTER UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_ard AFTER DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +-- now the classic UPSERT, with a DELETE +BEGIN; +UPDATE target SET balance = 0 WHERE tid = 3; +--EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND t.balance > s.delta THEN + UPDATE SET balance = t.balance - s.delta +WHEN MATCHED THEN + DELETE +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- Test behavior of triggers that turn UPDATE/DELETE into no-ops +create or replace function skip_merge_op() returns trigger +language plpgsql as +$$ +BEGIN + RETURN NULL; +END; +$$; +SELECT * FROM target full outer join source on (sid = tid); +create trigger merge_skip BEFORE INSERT OR UPDATE or DELETE + ON target FOR EACH ROW EXECUTE FUNCTION skip_merge_op(); +DO $$ +DECLARE + result integer; +BEGIN +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND s.sid = 3 THEN UPDATE SET balance = t.balance + s.delta +WHEN MATCHED THEN DELETE +WHEN NOT MATCHED THEN INSERT VALUES (sid, delta); +IF FOUND THEN + RAISE NOTICE 'Found'; +ELSE + RAISE NOTICE 'Not found'; +END IF; +GET DIAGNOSTICS result := ROW_COUNT; +RAISE NOTICE 'ROW_COUNT = %', result; +END; +$$; +SELECT * FROM target FULL OUTER JOIN source ON (sid = tid); +DROP TRIGGER merge_skip ON target; +DROP FUNCTION skip_merge_op(); +-- test from PL/pgSQL +-- make sure MERGE INTO isn't interpreted to mean returning variables like SELECT INTO +BEGIN; +DO LANGUAGE plpgsql $$ +BEGIN +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND t.balance > s.delta THEN + UPDATE SET balance = t.balance - s.delta; +END; +$$; +ROLLBACK; +--source constants +BEGIN; +MERGE INTO target t +USING (SELECT 9 AS sid, 57 AS delta) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +--source query +BEGIN; +MERGE INTO target t +USING (SELECT sid, delta FROM source WHERE delta > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING (SELECT sid, delta as newname FROM source WHERE delta > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.newname); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +--self-merge +BEGIN; +MERGE INTO target t1 +USING target t2 +ON t1.tid = t2.tid +WHEN MATCHED THEN + UPDATE SET balance = t1.balance + t2.balance +WHEN NOT MATCHED THEN + INSERT VALUES (t2.tid, t2.balance); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING (SELECT tid as sid, balance as delta FROM target WHERE balance > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +BEGIN; +MERGE INTO target t +USING +(SELECT sid, max(delta) AS delta + FROM source + GROUP BY sid + HAVING count(*) = 1 + ORDER BY sid ASC) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- plpgsql parameters and results +BEGIN; +CREATE FUNCTION merge_func (p_id integer, p_bal integer) +RETURNS INTEGER +LANGUAGE plpgsql +AS $$ +DECLARE + result integer; +BEGIN +MERGE INTO target t +USING (SELECT p_id AS sid) AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance - p_bal; +IF FOUND THEN + GET DIAGNOSTICS result := ROW_COUNT; +END IF; +RETURN result; +END; +$$; +SELECT merge_func(3, 4); +SELECT * FROM target ORDER BY tid; +ROLLBACK; +-- PREPARE +BEGIN; +prepare foom as merge into target t using (select 1 as sid) s on (t.tid = s.sid) when matched then update set balance = 1; +psql:include/ts_merge_query.sql:685: ERROR: prepared statement "foom" already exists +execute foom; +psql:include/ts_merge_query.sql:686: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +BEGIN; +PREPARE foom2 (integer, integer) AS +MERGE INTO target t +USING (SELECT 1) s +ON t.tid = $1 +WHEN MATCHED THEN +UPDATE SET balance = $2; +psql:include/ts_merge_query.sql:695: ERROR: prepared statement "foom2" already exists +--EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +execute foom2 (1, 1); +psql:include/ts_merge_query.sql:697: ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- subqueries in source relation +BEGIN; +MERGE INTO sq_target t +USING (SELECT * FROM sq_source) s +ON tid = sid +WHEN MATCHED AND t.balance > delta THEN + UPDATE SET balance = t.balance + delta; +SELECT * FROM sq_target ORDER BY tid; +ROLLBACK; +-- try a view +CREATE VIEW v AS SELECT * FROM sq_source WHERE sid < 2; +BEGIN; +MERGE INTO sq_target +USING v +ON tid = sid +WHEN MATCHED THEN + UPDATE SET balance = v.balance + delta; +SELECT * FROM sq_target ORDER BY tid; +ROLLBACK; +-- ambiguous reference to a column +BEGIN; +MERGE INTO sq_target +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +psql:include/ts_merge_query.sql:732: ERROR: column reference "balance" is ambiguous +LINE 5: UPDATE SET balance = balance + delta + ^ +ROLLBACK; +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +SELECT * FROM sq_target; +ROLLBACK; +-- CTEs +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +WITH targq AS ( + SELECT * FROM v +) +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +ROLLBACK; +-- RETURNING +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE +RETURNING *; +psql:include/ts_merge_query.sql:778: ERROR: syntax error at or near "RETURNING" +LINE 10: RETURNING *; + ^ +ROLLBACK; +-- EXPLAIN +CREATE TABLE ex_mtarget (a int, b int) + WITH (autovacuum_enabled=off); +CREATE TABLE ex_msource (a int, b int) + WITH (autovacuum_enabled=off); +INSERT INTO ex_mtarget SELECT i, i*10 FROM generate_series(1,100,2) i; +INSERT INTO ex_msource SELECT i, i*10 FROM generate_series(1,100,1) i; +CREATE FUNCTION explain_merge(query text) RETURNS SETOF text +LANGUAGE plpgsql AS +$$ +DECLARE ln text; +BEGIN + FOR ln IN + EXECUTE 'explain (analyze, timing off, summary off, costs off) ' || + query + LOOP + ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g'); + RETURN NEXT ln; + END LOOP; +END; +$$; +-- only updates +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED THEN + UPDATE SET b = t.b + 1'); +-- only updates to selected tuples +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1'); +-- updates + deletes +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1 +WHEN MATCHED AND t.a >= 10 AND t.a <= 20 THEN + DELETE'); +-- only inserts +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN NOT MATCHED AND s.a < 10 THEN + INSERT VALUES (a, b)'); +-- all three +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1 +WHEN MATCHED AND t.a >= 30 AND t.a <= 40 THEN + DELETE +WHEN NOT MATCHED AND s.a < 20 THEN + INSERT VALUES (a, b)'); +-- nothing +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a AND t.a < -1000 +WHEN MATCHED AND t.a < 10 THEN + DO NOTHING'); +DROP TABLE ex_msource, ex_mtarget; +DROP FUNCTION explain_merge(text); +-- Subqueries +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED THEN + UPDATE SET balance = (SELECT count(*) FROM sq_target); +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND (SELECT count(*) > 0 FROM sq_target) THEN + UPDATE SET balance = 42; +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid AND (SELECT count(*) > 0 FROM sq_target) +WHEN MATCHED THEN + UPDATE SET balance = 42; +SELECT * FROM sq_target WHERE tid = 1; +ROLLBACK; +DROP TABLE sq_target CASCADE; +DROP TABLE sq_source CASCADE; +CREATE TABLE pa_target (tid integer, balance float, val text) + PARTITION BY LIST (tid); +CREATE TABLE part1 PARTITION OF pa_target FOR VALUES IN (1,4) + WITH (autovacuum_enabled=off); +CREATE TABLE part2 PARTITION OF pa_target FOR VALUES IN (2,5,6) + WITH (autovacuum_enabled=off); +CREATE TABLE part3 PARTITION OF pa_target FOR VALUES IN (3,8,9) + WITH (autovacuum_enabled=off); +CREATE TABLE part4 PARTITION OF pa_target DEFAULT + WITH (autovacuum_enabled=off); +CREATE TABLE pa_source (sid integer, delta float); +-- insert many rows to the source table +INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- same with a constant qual +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND tid = 1 + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- try updating the partition key column +BEGIN; +CREATE FUNCTION merge_func() RETURNS integer LANGUAGE plpgsql AS $$ +DECLARE + result integer; +BEGIN +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +IF FOUND THEN + GET DIAGNOSTICS result := ROW_COUNT; +END IF; +RETURN result; +END; +$$; +SELECT merge_func(); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_target CASCADE; +-- The target table is partitioned in the same way, but this time by attaching +-- partitions which have columns in different order, dropped columns etc. +CREATE TABLE pa_target (tid integer, balance float, val text) + PARTITION BY LIST (tid); +CREATE TABLE part1 (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part2 (balance float, tid integer, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part3 (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part4 (extraid text, tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +ALTER TABLE part4 DROP COLUMN extraid; +ALTER TABLE pa_target ATTACH PARTITION part1 FOR VALUES IN (1,4); +ALTER TABLE pa_target ATTACH PARTITION part2 FOR VALUES IN (2,5,6); +ALTER TABLE pa_target ATTACH PARTITION part3 FOR VALUES IN (3,8,9); +ALTER TABLE pa_target ATTACH PARTITION part4 DEFAULT; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- same with a constant qual +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND tid IN (1, 5) + WHEN MATCHED AND tid % 5 = 0 THEN DELETE + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +-- try updating the partition key column +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- Sub-partitioning +CREATE TABLE pa_target (logts timestamp, tid integer, balance float, val text) + PARTITION BY RANGE (logts); +CREATE TABLE part_m01 PARTITION OF pa_target + FOR VALUES FROM ('2017-01-01') TO ('2017-02-01') + PARTITION BY LIST (tid); +CREATE TABLE part_m01_odd PARTITION OF part_m01 + FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); +CREATE TABLE part_m01_even PARTITION OF part_m01 + FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); +CREATE TABLE part_m02 PARTITION OF pa_target + FOR VALUES FROM ('2017-02-01') TO ('2017-03-01') + PARTITION BY LIST (tid); +CREATE TABLE part_m02_odd PARTITION OF part_m02 + FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); +CREATE TABLE part_m02_even PARTITION OF part_m02 + FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); +CREATE TABLE pa_source (sid integer, delta float) + WITH (autovacuum_enabled=off); +-- insert many rows to the source table +INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT '2017-01-31', id, id * 100, 'initial' FROM generate_series(1,9,3) AS id; +INSERT INTO pa_target SELECT '2017-02-28', id, id * 100, 'initial' FROM generate_series(2,9,3) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING (SELECT '2017-01-15' AS slogts, * FROM pa_source WHERE sid < 10) s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (slogts::timestamp, sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; +ROLLBACK; +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- some complex joins on the source side +-- source relation is an unaliased join +MERGE INTO cj_target t +USING cj_source1 s1 + INNER JOIN cj_source2 s2 ON sid1 = sid2 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid1, delta, sval); +-- try accessing columns from either side of the source join +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid2, delta, sval) +WHEN MATCHED THEN + DELETE; +-- some simple expressions in INSERT targetlist +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid2, delta + scat, sval) +WHEN MATCHED THEN + UPDATE SET val = val || ' updated by merge'; +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 +ON t.tid = sid1 +WHEN MATCHED THEN + UPDATE SET val = val || ' ' || delta::text; +SELECT * FROM cj_target ORDER BY tid; +ALTER TABLE cj_source1 RENAME COLUMN sid1 TO sid; +ALTER TABLE cj_source2 RENAME COLUMN sid2 TO sid; +TRUNCATE cj_target; +MERGE INTO cj_target t +USING cj_source1 s1 + INNER JOIN cj_source2 s2 ON s1.sid = s2.sid +ON t.tid = s1.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s2.sid, delta, sval); +DROP TABLE cj_source2, cj_source1; +DROP TABLE cj_target CASCADE; +-- Function scans +MERGE INTO fs_target t +USING generate_series(1,100,1) AS id +ON t.a = id +WHEN MATCHED THEN + UPDATE SET b = b + id +WHEN NOT MATCHED THEN + INSERT VALUES (id, -1); +MERGE INTO fs_target t +USING generate_series(1,100,2) AS id +ON t.a = id +WHEN MATCHED THEN + UPDATE SET b = b + id, c = 'updated '|| id.*::text +WHEN NOT MATCHED THEN + INSERT VALUES (id, -1, 'inserted ' || id.*::text); +SELECT count(*) FROM fs_target; +DROP TABLE fs_target CASCADE; +-- SERIALIZABLE test +-- handled in isolation tests +-- Inheritance-based partitioning +CREATE TABLE measurement ( + city_id int not null, + logdate date not null, + peaktemp int, + unitsales int +) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2006m02 ( + CHECK ( logdate >= DATE '2006-02-01' AND logdate < DATE '2006-03-01' ) +) INHERITS (measurement) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2006m03 ( + CHECK ( logdate >= DATE '2006-03-01' AND logdate < DATE '2006-04-01' ) +) INHERITS (measurement) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2007m01 ( + filler text, + peaktemp int, + logdate date not null, + city_id int not null, + unitsales int + CHECK ( logdate >= DATE '2007-01-01' AND logdate < DATE '2007-02-01') +) WITH (autovacuum_enabled=off); +ALTER TABLE measurement_y2007m01 DROP COLUMN filler; +ALTER TABLE measurement_y2007m01 INHERIT measurement; +INSERT INTO measurement VALUES (0, '2005-07-21', 5, 15); +CREATE OR REPLACE FUNCTION measurement_insert_trigger() +RETURNS TRIGGER AS $$ +BEGIN + IF ( NEW.logdate >= DATE '2006-02-01' AND + NEW.logdate < DATE '2006-03-01' ) THEN + INSERT INTO measurement_y2006m02 VALUES (NEW.*); + ELSIF ( NEW.logdate >= DATE '2006-03-01' AND + NEW.logdate < DATE '2006-04-01' ) THEN + INSERT INTO measurement_y2006m03 VALUES (NEW.*); + ELSIF ( NEW.logdate >= DATE '2007-01-01' AND + NEW.logdate < DATE '2007-02-01' ) THEN + INSERT INTO measurement_y2007m01 (city_id, logdate, peaktemp, unitsales) + VALUES (NEW.*); + ELSE + RAISE EXCEPTION 'Date out of range. Fix the measurement_insert_trigger() function!'; + END IF; + RETURN NULL; +END; +$$ LANGUAGE plpgsql ; +CREATE TRIGGER insert_measurement_trigger + BEFORE INSERT ON measurement + FOR EACH ROW EXECUTE PROCEDURE measurement_insert_trigger(); +INSERT INTO measurement VALUES (1, '2006-02-10', 35, 10); +INSERT INTO measurement VALUES (1, '2006-02-16', 45, 20); +INSERT INTO measurement VALUES (1, '2006-03-17', 25, 10); +INSERT INTO measurement VALUES (1, '2006-03-27', 15, 40); +INSERT INTO measurement VALUES (1, '2007-01-15', 10, 10); +INSERT INTO measurement VALUES (1, '2007-01-17', 10, 10); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; +CREATE TABLE new_measurement (LIKE measurement) WITH (autovacuum_enabled=off); +INSERT INTO new_measurement VALUES (0, '2005-07-21', 25, 20); +INSERT INTO new_measurement VALUES (1, '2006-03-01', 20, 10); +INSERT INTO new_measurement VALUES (1, '2006-02-16', 50, 10); +INSERT INTO new_measurement VALUES (2, '2006-02-10', 20, 20); +INSERT INTO new_measurement VALUES (1, '2006-03-27', NULL, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-17', NULL, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-15', 5, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-16', 10, 10); +BEGIN; +MERGE INTO ONLY measurement m + USING new_measurement nm ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE +WHEN MATCHED THEN UPDATE + SET peaktemp = greatest(m.peaktemp, nm.peaktemp), + unitsales = m.unitsales + coalesce(nm.unitsales, 0) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id, logdate, peaktemp, unitsales); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate, peaktemp; +ROLLBACK; +MERGE into measurement m + USING new_measurement nm ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE +WHEN MATCHED THEN UPDATE + SET peaktemp = greatest(m.peaktemp, nm.peaktemp), + unitsales = m.unitsales + coalesce(nm.unitsales, 0) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id, logdate, peaktemp, unitsales); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; +BEGIN; +MERGE INTO new_measurement nm + USING ONLY measurement m ON + (nm.city_id = m.city_id and nm.logdate=m.logdate) +WHEN MATCHED THEN DELETE; +SELECT * FROM new_measurement ORDER BY city_id, logdate; +ROLLBACK; +MERGE INTO new_measurement nm + USING measurement m ON + (nm.city_id = m.city_id and nm.logdate=m.logdate) +WHEN MATCHED THEN DELETE; +SELECT * FROM new_measurement ORDER BY city_id, logdate; +DROP TABLE measurement, new_measurement CASCADE; +DROP FUNCTION measurement_insert_trigger(); +RESET SESSION AUTHORIZATION; +DROP TABLE target CASCADE; +DROP TABLE target2 CASCADE; +DROP TABLE source, source2; +DROP FUNCTION merge_trigfunc(); +REVOKE CREATE ON SCHEMA public FROM regress_merge_privs; +DROP USER regress_merge_privs; +DROP USER regress_merge_no_privs; +\o +:DIFF_CMD diff --git a/test/sql/.gitignore b/test/sql/.gitignore index 5a362a06cdd..431ff1e5d3c 100644 --- a/test/sql/.gitignore +++ b/test/sql/.gitignore @@ -4,14 +4,19 @@ /cursor-*.sql /ddl-*.sql /delete-*.sql +/histogram_test-*.sql +/insert-*.sql /insert_many-*.sql /parallel-*.sql /partitioning-*.sql +/plan_expand_hypertable-*.sql /plan_hashagg-*.sql /plan_hashagg_optimized-*.sql /plan_hypertable_cache-*.sql /plan_ordered_append-*.sql /query-*.sql /rowsecurity-*.sql +/timestamp-*.sql +/ts_merge-*.sql /update-*.sql /loader-*.sql diff --git a/test/sql/CMakeLists.txt b/test/sql/CMakeLists.txt index 977ab333fe6..928f3d41977 100644 --- a/test/sql/CMakeLists.txt +++ b/test/sql/CMakeLists.txt @@ -26,10 +26,8 @@ set(TEST_FILES generated_as_identity.sql grant_hypertable.sql hash.sql - histogram_test.sql index.sql information_views.sql - insert.sql insert_many.sql insert_single.sql insert_returning.sql @@ -38,12 +36,10 @@ set(TEST_FILES partition.sql partitioning.sql partitionwise.sql - plan_expand_hypertable.sql pg_dump_unprivileged.sql pg_join.sql plain.sql plan_hypertable_inline.sql - plan_ordered_append.sql relocate_extension.sql reloptions.sql size_utils.sql @@ -51,7 +47,6 @@ set(TEST_FILES sql_query.sql tableam.sql tablespace.sql - timestamp.sql triggers.sql truncate.sql upsert.sql @@ -66,11 +61,16 @@ set(TEST_TEMPLATES cursor.sql.in ddl.sql.in delete.sql.in + histogram_test.sql.in + insert.sql.in plan_hashagg.sql.in rowsecurity.sql.in update.sql.in parallel.sql.in - query.sql.in) + plan_expand_hypertable.sql.in + plan_ordered_append.sql.in + query.sql.in + timestamp.sql.in) # Loader test must distinguish between Apache and TSL builds so we parametrize # this here @@ -122,7 +122,8 @@ if((${PG_VERSION_MAJOR} GREATER_EQUAL "14")) endif() if((${PG_VERSION_MAJOR} GREATER_EQUAL "15")) - list(APPEND TEST_FILES merge.sql ts_merge.sql) + list(APPEND TEST_FILES merge.sql) + list(APPEND TEST_TEMPLATES ts_merge.sql.in) endif() # only test custom type if we are in 64-bit architecture diff --git a/test/sql/histogram_test.sql b/test/sql/histogram_test.sql.in similarity index 97% rename from test/sql/histogram_test.sql rename to test/sql/histogram_test.sql.in index 0f9aadc78b9..ec416f9eb6d 100644 --- a/test/sql/histogram_test.sql +++ b/test/sql/histogram_test.sql.in @@ -72,7 +72,7 @@ INSERT INTO weather VALUES ('2023-03-23 06:12:02.73765+00 ','city1', 9.7), ('2023-03-23 06:12:06.990998+00','city1',11.7); --- This will currently generate an error. +-- This will currently generate an error on PG15 and prior versions \set ON_ERROR_STOP 0 SELECT histogram(temperature, -1.79769e+308, 1.79769e+308,10) FROM weather GROUP BY city; \set ON_ERROR_STOP 1 diff --git a/test/sql/insert.sql b/test/sql/insert.sql.in similarity index 100% rename from test/sql/insert.sql rename to test/sql/insert.sql.in diff --git a/test/sql/plan_expand_hypertable.sql b/test/sql/plan_expand_hypertable.sql.in similarity index 100% rename from test/sql/plan_expand_hypertable.sql rename to test/sql/plan_expand_hypertable.sql.in diff --git a/test/sql/plan_ordered_append.sql b/test/sql/plan_ordered_append.sql.in similarity index 100% rename from test/sql/plan_ordered_append.sql rename to test/sql/plan_ordered_append.sql.in diff --git a/test/sql/timestamp.sql b/test/sql/timestamp.sql.in similarity index 100% rename from test/sql/timestamp.sql rename to test/sql/timestamp.sql.in diff --git a/test/sql/ts_merge.sql b/test/sql/ts_merge.sql.in similarity index 100% rename from test/sql/ts_merge.sql rename to test/sql/ts_merge.sql.in diff --git a/tsl/src/data_node.c b/tsl/src/data_node.c index 45349590b77..971b4a5f151 100644 --- a/tsl/src/data_node.c +++ b/tsl/src/data_node.c @@ -857,18 +857,52 @@ data_node_add_internal(PG_FUNCTION_ARGS, bool set_distid) Datum data_node_add(PG_FUNCTION_ARGS) { +#if PG16_GE + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("adding data node is not supported"), + errdetail("Multi-node is not supported anymore on PostgreSQL >= 16."))); +#else + ereport(WARNING, + (errcode(ERRCODE_WARNING_DEPRECATED_FEATURE), + errmsg("adding data node is deprecated"), + errdetail("Multi-node is deprecated and will be removed in future releases."))); +#endif return data_node_add_internal(fcinfo, true); } Datum data_node_add_without_dist_id(PG_FUNCTION_ARGS) { +#if PG16_GE + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("adding data node is not supported"), + errdetail("Multi-node is not supported anymore on PostgreSQL >= 16."))); +#else + ereport(WARNING, + (errcode(ERRCODE_WARNING_DEPRECATED_FEATURE), + errmsg("adding data node is deprecated"), + errdetail("Multi-node is deprecated and will be removed in future releases."))); +#endif return data_node_add_internal(fcinfo, false); } Datum data_node_attach(PG_FUNCTION_ARGS) { +#if PG16_GE + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("attaching data node is not supported"), + errdetail("Multi-node is not supported anymore on PostgreSQL >= 16."))); +#else + ereport(WARNING, + (errcode(ERRCODE_WARNING_DEPRECATED_FEATURE), + errmsg("attaching data node is deprecated"), + errdetail("Multi-node is deprecated and will be removed in future releases."))); +#endif + const char *node_name = PG_ARGISNULL(0) ? NULL : PG_GETARG_CSTRING(0); Oid table_id = PG_GETARG_OID(1); bool if_not_attached = PG_ARGISNULL(2) ? false : PG_GETARG_BOOL(2); @@ -1404,6 +1438,18 @@ data_node_block_new_chunks(PG_FUNCTION_ARGS) Datum data_node_detach(PG_FUNCTION_ARGS) { +#if PG16_GE + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("detaching data node is not supported"), + errdetail("Multi-node is not supported anymore on PostgreSQL >= 16."))); +#else + ereport(WARNING, + (errcode(ERRCODE_WARNING_DEPRECATED_FEATURE), + errmsg("detaching data node is deprecated"), + errdetail("Multi-node is deprecated and will be removed in future releases."))); +#endif + const char *node_name = PG_ARGISNULL(0) ? NULL : NameStr(*PG_GETARG_NAME(0)); Oid table_id = PG_ARGISNULL(1) ? InvalidOid : PG_GETARG_OID(1); bool all_hypertables = PG_ARGISNULL(1); @@ -1601,6 +1647,18 @@ append_data_node_option(List *new_options, List **current_options, const char *n Datum data_node_alter(PG_FUNCTION_ARGS) { +#if PG16_GE + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("altering data node is not supported"), + errdetail("Multi-node is not supported anymore on PostgreSQL >= 16."))); +#else + ereport(WARNING, + (errcode(ERRCODE_WARNING_DEPRECATED_FEATURE), + errmsg("altering data node is deprecated"), + errdetail("Multi-node is deprecated and will be removed in future releases."))); +#endif + const char *node_name = PG_ARGISNULL(0) ? NULL : NameStr(*PG_GETARG_NAME(0)); const char *host = PG_ARGISNULL(1) ? NULL : TextDatumGetCString(PG_GETARG_DATUM(1)); const char *database = PG_ARGISNULL(2) ? NULL : NameStr(*PG_GETARG_NAME(2)); @@ -1826,6 +1884,18 @@ drop_data_node_database(const ForeignServer *server) Datum data_node_delete(PG_FUNCTION_ARGS) { +#if PG16_GE + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("deleting data node is not supported"), + errdetail("Multi-node is not supported anymore on PostgreSQL >= 16."))); +#else + ereport(WARNING, + (errcode(ERRCODE_WARNING_DEPRECATED_FEATURE), + errmsg("deleting data node is deprecated"), + errdetail("Multi-node is deprecated and will be removed in future releases."))); +#endif + const char *node_name = PG_ARGISNULL(0) ? NULL : PG_GETARG_CSTRING(0); bool if_exists = PG_ARGISNULL(1) ? false : PG_GETARG_BOOL(1); bool force = PG_ARGISNULL(2) ? false : PG_GETARG_BOOL(2); diff --git a/tsl/src/dist_backup.c b/tsl/src/dist_backup.c index 7d90d42db3b..d9e32262879 100644 --- a/tsl/src/dist_backup.c +++ b/tsl/src/dist_backup.c @@ -65,6 +65,18 @@ create_restore_point_datum(TupleDesc tupdesc, const char *node_name, XLogRecPtr Datum create_distributed_restore_point(PG_FUNCTION_ARGS) { +#if PG16_GE + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("creating distributed restore point is not supported"), + errdetail("Multi-node is not supported anymore on PostgreSQL >= 16."))); +#else + ereport(WARNING, + (errcode(ERRCODE_WARNING_DEPRECATED_FEATURE), + errmsg("creating distributed restore point is deprecated"), + errdetail("Multi-node is deprecated and will be removed in future releases."))); +#endif + const char *name = TextDatumGetCString(PG_GETARG_DATUM(0)); DistCmdResult *result_cmd; FuncCallContext *funcctx; diff --git a/tsl/src/remote/dist_commands.c b/tsl/src/remote/dist_commands.c index 6f335f0587b..92e3efd0fc6 100644 --- a/tsl/src/remote/dist_commands.c +++ b/tsl/src/remote/dist_commands.c @@ -523,6 +523,13 @@ ts_dist_cmd_close_prepared_command(PreparedDistCmd *command) Datum ts_dist_cmd_exec(PG_FUNCTION_ARGS) { +#if PG16_GE + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("distributed command execution is not supported"), + errdetail("Multi-node is not supported anymore on PostgreSQL >= 16."))); +#endif + const char *query = PG_ARGISNULL(0) ? NULL : TextDatumGetCString(PG_GETARG_DATUM(0)); ArrayType *data_nodes = PG_ARGISNULL(1) ? NULL : PG_GETARG_ARRAYTYPE_P(1); bool transactional = PG_ARGISNULL(2) ? true : PG_GETARG_BOOL(2); diff --git a/tsl/test/expected/cagg_bgw-16.out b/tsl/test/expected/cagg_bgw-16.out index 63f8f8cc7bb..f2976f5a568 100644 --- a/tsl/test/expected/cagg_bgw-16.out +++ b/tsl/test/expected/cagg_bgw-16.out @@ -159,15 +159,14 @@ SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); (1 row) SELECT * FROM sorted_bgw_log; - msg_no | mock_time | application_name | msg ---------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+------------------------------------------------------------------------------------------------- 0 | 0 | DB Scheduler | [TESTING] Registered new background worker 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) 0 | 0 | Refresh Continuous Aggregate Policy [1000] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -2147483648, 6 ] 1 | 0 | Refresh Continuous Aggregate Policy [1000] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_2" 2 | 0 | Refresh Continuous Aggregate Policy [1000] | inserted 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_2" - 3 | 0 | Refresh Continuous Aggregate Policy [1000] | job 1000 (Refresh Continuous Aggregate Policy [1000]) exiting with success: execution time (RANDOM) ms -(6 rows) +(5 rows) SELECT * FROM _timescaledb_config.bgw_job where id=:job_id; id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone @@ -395,15 +394,14 @@ SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); (1 row) SELECT * FROM sorted_bgw_log; - msg_no | mock_time | application_name | msg ---------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+------------------------------------------------------------------------------------------------- 0 | 0 | DB Scheduler | [TESTING] Registered new background worker 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" - 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms -(6 rows) +(5 rows) -- job ran once, successfully SELECT job_id, last_finish - next_start as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes @@ -443,21 +441,19 @@ SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25, 25); (1 row) SELECT * FROM sorted_bgw_log; - msg_no | mock_time | application_name | msg ---------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+------------------------------------------------------------------------------------------------- 0 | 0 | DB Scheduler | [TESTING] Registered new background worker 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" - 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) 0 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] 1 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | deleted 1 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" 2 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" - 3 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms -(12 rows) +(10 rows) SELECT job_id, next_start - last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes FROM _timescaledb_internal.bgw_job_stat @@ -677,19 +673,18 @@ SELECT * FROM test_continuous_agg_view_user_2; \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER SELECT * from sorted_bgw_log; - msg_no | mock_time | application_name | msg ---------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+------------------------------------------------------------------------------------------------- 0 | 0 | DB Scheduler | [TESTING] Registered new background worker 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) 0 | 0 | Refresh Continuous Aggregate Policy [1003] | refreshing continuous aggregate "test_continuous_agg_view_user_2" in window [ -2147483648, 2 ] 1 | 0 | Refresh Continuous Aggregate Policy [1003] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_6" 2 | 0 | Refresh Continuous Aggregate Policy [1003] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_6" - 3 | 0 | Refresh Continuous Aggregate Policy [1003] | job 1003 (Refresh Continuous Aggregate Policy [1003]) exiting with success: execution time (RANDOM) ms 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) 0 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | job 1003 threw an error 1 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | permission denied for table test_continuous_agg_table_w_grant -(10 rows) +(9 rows) -- Count the number of continuous aggregate policies SELECT count(*) FROM _timescaledb_config.bgw_job diff --git a/tsl/test/expected/cagg_bgw_dist_ht.out b/tsl/test/expected/cagg_bgw_dist_ht.out index e7ca0d9ee6c..5b85e7de721 100644 --- a/tsl/test/expected/cagg_bgw_dist_ht.out +++ b/tsl/test/expected/cagg_bgw_dist_ht.out @@ -28,6 +28,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------------+-----------------------+--------------+------------------+------------------- db_cagg_bgw_dist_ht_1 | db_cagg_bgw_dist_ht_1 | t | t | t @@ -120,6 +123,7 @@ psql:include/cagg_bgw_common.sql:76: WARNING: no privileges were granted for "p CREATE TABLE test_continuous_agg_table(time int, data int); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('test_continuous_agg_table', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_bgw_common.sql:80: WARNING: distributed hypertable is deprecated psql:include/cagg_bgw_common.sql:80: NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ---------------------------------------- @@ -196,15 +200,14 @@ SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); (1 row) SELECT * FROM sorted_bgw_log; - msg_no | mock_time | application_name | msg ---------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+------------------------------------------------------------------------------------------------- 0 | 0 | DB Scheduler | [TESTING] Registered new background worker 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) 0 | 0 | Refresh Continuous Aggregate Policy [1000] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -2147483648, 6 ] 1 | 0 | Refresh Continuous Aggregate Policy [1000] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_2" 2 | 0 | Refresh Continuous Aggregate Policy [1000] | inserted 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_2" - 3 | 0 | Refresh Continuous Aggregate Policy [1000] | job 1000 (Refresh Continuous Aggregate Policy [1000]) exiting with success: execution time (RANDOM) ms -(6 rows) +(5 rows) SELECT * FROM _timescaledb_config.bgw_job where id=:job_id; id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone @@ -432,15 +435,14 @@ SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25); (1 row) SELECT * FROM sorted_bgw_log; - msg_no | mock_time | application_name | msg ---------+-----------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + msg_no | mock_time | application_name | msg +--------+-----------+--------------------------------------------+------------------------------------------------------------------------------------------------- 0 | 0 | DB Scheduler | [TESTING] Registered new background worker 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" - 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms -(6 rows) +(5 rows) -- job ran once, successfully SELECT job_id, last_finish - next_start as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes @@ -480,21 +482,19 @@ SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(25, 25); (1 row) SELECT * FROM sorted_bgw_log; - msg_no | mock_time | application_name | msg ---------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+------------------------------------------------------------------------------------------------- 0 | 0 | DB Scheduler | [TESTING] Registered new background worker 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) 0 | 0 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] 1 | 0 | Refresh Continuous Aggregate Policy [1001] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" 2 | 0 | Refresh Continuous Aggregate Policy [1001] | inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" - 3 | 0 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) 0 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | refreshing continuous aggregate "test_continuous_agg_view" in window [ -90, 12 ] 1 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | deleted 1 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_3" 2 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_3" - 3 | 43200000000 | Refresh Continuous Aggregate Policy [1001] | job 1001 (Refresh Continuous Aggregate Policy [1001]) exiting with success: execution time (RANDOM) ms -(12 rows) +(10 rows) SELECT job_id, next_start - last_finish as until_next, last_run_success, total_runs, total_successes, total_failures, total_crashes FROM _timescaledb_internal.bgw_job_stat @@ -610,6 +610,7 @@ SELECT ts_bgw_params_reset_time(); CREATE TABLE test_continuous_agg_table_w_grant(time int, data int); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('test_continuous_agg_table_w_grant', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_bgw_common.sql:330: WARNING: distributed hypertable is deprecated psql:include/cagg_bgw_common.sql:330: NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------------------------ @@ -714,19 +715,18 @@ SELECT * FROM test_continuous_agg_view_user_2; \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER SELECT * from sorted_bgw_log; - msg_no | mock_time | application_name | msg ---------+-------------+--------------------------------------------+-------------------------------------------------------------------------------------------------------- + msg_no | mock_time | application_name | msg +--------+-------------+--------------------------------------------+------------------------------------------------------------------------------------------------- 0 | 0 | DB Scheduler | [TESTING] Registered new background worker 1 | 0 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) 0 | 0 | Refresh Continuous Aggregate Policy [1003] | refreshing continuous aggregate "test_continuous_agg_view_user_2" in window [ -2147483648, 2 ] 1 | 0 | Refresh Continuous Aggregate Policy [1003] | deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_6" 2 | 0 | Refresh Continuous Aggregate Policy [1003] | inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_6" - 3 | 0 | Refresh Continuous Aggregate Policy [1003] | job 1003 (Refresh Continuous Aggregate Policy [1003]) exiting with success: execution time (RANDOM) ms 0 | 43200000000 | DB Scheduler | [TESTING] Registered new background worker 1 | 43200000000 | DB Scheduler | [TESTING] Wait until (RANDOM), started at (RANDOM) 0 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | job 1003 threw an error 1 | 43200000000 | Refresh Continuous Aggregate Policy [1003] | permission denied for table test_continuous_agg_table_w_grant -(10 rows) +(9 rows) -- Count the number of continuous aggregate policies SELECT count(*) FROM _timescaledb_config.bgw_job diff --git a/tsl/test/expected/cagg_ddl_dist_ht-13.out b/tsl/test/expected/cagg_ddl_dist_ht-13.out deleted file mode 100644 index 9f782583746..00000000000 --- a/tsl/test/expected/cagg_ddl_dist_ht-13.out +++ /dev/null @@ -1,2207 +0,0 @@ --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. ------------------------------------- --- Set up a distributed environment ------------------------------------- -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 -\set DATA_NODE_3 :TEST_DBNAME _3 -\ir include/remote_exec.sql --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. -CREATE SCHEMA IF NOT EXISTS test; -psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping -GRANT USAGE ON SCHEMA test TO PUBLIC; -CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) -RETURNS VOID -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' -LANGUAGE C; -CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) -RETURNS TABLE("table_record" CSTRING[]) -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' -LANGUAGE C; -SELECT node_name, database, node_created, database_created, extension_created -FROM ( - SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* - FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) -) a; - node_name | database | node_created | database_created | extension_created ------------------------+-----------------------+--------------+------------------+------------------- - db_cagg_ddl_dist_ht_1 | db_cagg_ddl_dist_ht_1 | t | t | t - db_cagg_ddl_dist_ht_2 | db_cagg_ddl_dist_ht_2 | t | t | t - db_cagg_ddl_dist_ht_3 | db_cagg_ddl_dist_ht_3 | t | t | t -(3 rows) - -GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; --- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes -GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; -\set IS_DISTRIBUTED TRUE -\ir include/cagg_ddl_common.sql --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. --- Set this variable to avoid using a hard-coded path each time query --- results are compared -\set QUERY_RESULT_TEST_EQUAL_RELPATH '../../../../test/sql/include/query_result_test_equal.sql' -\if :IS_DISTRIBUTED -\echo 'Running distributed hypertable tests' -Running distributed hypertable tests -\else -\echo 'Running local hypertable tests' -\endif -SET ROLE :ROLE_DEFAULT_PERM_USER; ---DDL commands on continuous aggregates -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature integer NULL, - humidity DOUBLE PRECISION NULL, - timemeasure TIMESTAMPTZ, - timeinterval INTERVAL -); -\if :IS_DISTRIBUTED -SELECT table_name FROM create_distributed_hypertable('conditions', 'timec', replication_factor => 2); - table_name ------------- - conditions -(1 row) - -\else -SELECT table_name FROM create_hypertable('conditions', 'timec'); -\endif --- schema tests -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; -CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; -CREATE SCHEMA rename_schema; -GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER; -SET ROLE :ROLE_DEFAULT_PERM_USER; -CREATE TABLE foo(time TIMESTAMPTZ NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('foo', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (2,public,foo,t) -(1 row) - -\else -SELECT create_hypertable('foo', 'time'); -\endif -CREATE MATERIALIZED VIEW rename_test - WITH ( timescaledb.continuous, timescaledb.materialized_only=true) -AS SELECT time_bucket('1week', time), COUNT(data) - FROM foo - GROUP BY 1 WITH NO DATA; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+-----------------------+------------------- - public | rename_test | _timescaledb_internal | _partial_view_3 -(1 row) - -ALTER MATERIALIZED VIEW rename_test SET SCHEMA rename_schema; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+-----------------------+------------------- - rename_schema | rename_test | _timescaledb_internal | _partial_view_3 -(1 row) - -SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", - h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA", - direct_view_name as "DIR_VIEW_NAME", - direct_view_schema as "DIR_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'rename_test' -\gset -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER VIEW :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME" SET SCHEMA public; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+---------------------+------------------- - rename_schema | rename_test | public | _partial_view_3 -(1 row) - ---alter direct view schema -SELECT user_view_schema, user_view_name, direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | direct_view_schema | direct_view_name -------------------+----------------+-----------------------+------------------ - rename_schema | rename_test | _timescaledb_internal | _direct_view_3 -(1 row) - -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER VIEW :"DIR_VIEW_SCHEMA".:"DIR_VIEW_NAME" SET SCHEMA public; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+----------------+---------------------+-------------------+--------------------+------------------ - rename_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 -(1 row) - -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER SCHEMA rename_schema RENAME TO new_name_schema; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+----------------+---------------------+-------------------+--------------------+------------------ - new_name_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 -(1 row) - -ALTER VIEW :"PART_VIEW_NAME" SET SCHEMA new_name_schema; -ALTER VIEW :"DIR_VIEW_NAME" SET SCHEMA new_name_schema; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+----------------+---------------------+-------------------+--------------------+------------------ - new_name_schema | rename_test | new_name_schema | _partial_view_3 | new_name_schema | _direct_view_3 -(1 row) - -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER SCHEMA new_name_schema RENAME TO foo_name_schema; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+---------------------+------------------- - foo_name_schema | rename_test | foo_name_schema | _partial_view_3 -(1 row) - -ALTER MATERIALIZED VIEW foo_name_schema.rename_test SET SCHEMA public; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+---------------------+------------------- - public | rename_test | foo_name_schema | _partial_view_3 -(1 row) - -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER SCHEMA foo_name_schema RENAME TO rename_schema; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SET client_min_messages TO NOTICE; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+---------------------+------------------- - public | rename_test | rename_schema | _partial_view_3 -(1 row) - -ALTER MATERIALIZED VIEW rename_test RENAME TO rename_c_aggregate; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+--------------------+---------------------+------------------- - public | rename_c_aggregate | rename_schema | _partial_view_3 -(1 row) - -SELECT * FROM rename_c_aggregate; - time_bucket | count --------------+------- -(0 rows) - -ALTER VIEW rename_schema.:"PART_VIEW_NAME" RENAME TO partial_view; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+--------------------+---------------------+-------------------+--------------------+------------------ - public | rename_c_aggregate | rename_schema | partial_view | rename_schema | _direct_view_3 -(1 row) - ---rename direct view -ALTER VIEW rename_schema.:"DIR_VIEW_NAME" RENAME TO direct_view; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+--------------------+---------------------+-------------------+--------------------+------------------ - public | rename_c_aggregate | rename_schema | partial_view | rename_schema | direct_view -(1 row) - --- drop_chunks tests -DROP TABLE conditions CASCADE; -DROP TABLE foo CASCADE; -psql:include/cagg_ddl_common.sql:161: NOTICE: drop cascades to 2 other objects -CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT hypertable_id AS drop_chunks_table_id - FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset -\else -SELECT hypertable_id AS drop_chunks_table_id - FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset -\endif -CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; -$DIST$); -\endif -SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test'); - set_integer_now_func ----------------------- - -(1 row) - -CREATE MATERIALIZED VIEW drop_chunks_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('5', time), COUNT(data) - FROM drop_chunks_table - GROUP BY 1 WITH NO DATA; -SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table, - schema_name AS drop_chunks_mat_schema, - table_name AS drop_chunks_mat_table_name - FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg - WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id - AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset --- create 3 chunks, with 3 time bucket -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i; --- Only refresh up to bucket 15 initially. Matches the old refresh --- behavior that didn't materialize everything -CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); -SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; - count -------- - 3 -(1 row) - -SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; - count -------- - 1 -(1 row) - -SELECT * FROM drop_chunks_view ORDER BY 1; - time_bucket | count --------------+------- - 0 | 5 - 5 | 5 - 10 | 5 -(3 rows) - --- cannot drop directly from the materialization table without specifying --- cont. aggregate view name explicitly -\set ON_ERROR_STOP 0 -SELECT drop_chunks(:'drop_chunks_mat_table', - newer_than => -20, - verbose => true); -psql:include/cagg_ddl_common.sql:213: ERROR: operation not supported on materialized hypertable -\set ON_ERROR_STOP 1 -SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; - count -------- - 3 -(1 row) - -SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; - count -------- - 1 -(1 row) - -SELECT * FROM drop_chunks_view ORDER BY 1; - time_bucket | count --------------+------- - 0 | 5 - 5 | 5 - 10 | 5 -(3 rows) - --- drop chunks when the chunksize and time_bucket aren't aligned -DROP TABLE drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk -CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT hypertable_id AS drop_chunks_table_u_id - FROM create_distributed_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7, replication_factor => 2) \gset -\else -SELECT hypertable_id AS drop_chunks_table_u_id - FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset -\endif -CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; -$DIST$); -\endif -SELECT set_integer_now_func('drop_chunks_table_u', 'integer_now_test1'); - set_integer_now_func ----------------------- - -(1 row) - -CREATE MATERIALIZED VIEW drop_chunks_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('3', time), COUNT(data) - FROM drop_chunks_table_u - GROUP BY 1 WITH NO DATA; -SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table_u, - schema_name AS drop_chunks_mat_schema, - table_name AS drop_chunks_mat_table_u_name - FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg - WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id - AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset --- create 3 chunks, with 3 time bucket -INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i; --- Refresh up to bucket 15 to match old materializer behavior -CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); -SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; - count -------- - 4 -(1 row) - -SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; - count -------- - 1 -(1 row) - -SELECT * FROM drop_chunks_view ORDER BY 1; - time_bucket | count --------------+------- - 0 | 3 - 3 | 3 - 6 | 3 - 9 | 3 - 12 | 3 -(5 rows) - --- TRUNCATE test --- Can truncate regular hypertables that have caggs -TRUNCATE drop_chunks_table_u; -\set ON_ERROR_STOP 0 --- Can't truncate materialized hypertables directly -TRUNCATE :drop_chunks_mat_table_u; -psql:include/cagg_ddl_common.sql:271: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate -\set ON_ERROR_STOP 1 --- Check that we don't interfere with TRUNCATE of normal table and --- partitioned table -CREATE TABLE truncate (value int); -INSERT INTO truncate VALUES (1), (2); -TRUNCATE truncate; -SELECT * FROM truncate; - value -------- -(0 rows) - -CREATE TABLE truncate_partitioned (value int) - PARTITION BY RANGE(value); -CREATE TABLE truncate_p1 PARTITION OF truncate_partitioned - FOR VALUES FROM (1) TO (3); -INSERT INTO truncate_partitioned VALUES (1), (2); -TRUNCATE truncate_partitioned; -SELECT * FROM truncate_partitioned; - value -------- -(0 rows) - --- ALTER TABLE tests -\set ON_ERROR_STOP 0 --- test a variety of ALTER TABLE statements -ALTER TABLE :drop_chunks_mat_table_u RENAME time_bucket TO bad_name; -psql:include/cagg_ddl_common.sql:291: ERROR: renaming columns on materialization tables is not supported -ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(time_bucket); -psql:include/cagg_ddl_common.sql:292: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED; -psql:include/cagg_ddl_common.sql:293: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY; -psql:include/cagg_ddl_common.sql:294: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ADD COLUMN fizzle INTEGER; -psql:include/cagg_ddl_common.sql:295: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u DROP COLUMN time_bucket; -psql:include/cagg_ddl_common.sql:296: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket DROP NOT NULL; -psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET DEFAULT 1; -psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET STORAGE EXTERNAL; -psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u DISABLE TRIGGER ALL; -psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u SET TABLESPACE foo; -psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u NOT OF; -psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u OWNER TO CURRENT_USER; -psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables -\set ON_ERROR_STOP 1 -ALTER TABLE :drop_chunks_mat_table_u SET SCHEMA public; -ALTER TABLE :drop_chunks_mat_table_u_name RENAME TO new_name; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SET client_min_messages TO NOTICE; -SELECT * FROM new_name; - time_bucket | count --------------+------- - 0 | 3 - 3 | 3 - 6 | 3 - 9 | 3 - 12 | 3 -(5 rows) - -SELECT * FROM drop_chunks_view ORDER BY 1; - time_bucket | count --------------+------- - 0 | 3 - 3 | 3 - 6 | 3 - 9 | 3 - 12 | 3 -(5 rows) - -\set ON_ERROR_STOP 0 --- no continuous aggregates on a continuous aggregate materialization table -CREATE MATERIALIZED VIEW new_name_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('6', time_bucket), COUNT("count") - FROM new_name - GROUP BY 1 WITH NO DATA; -psql:include/cagg_ddl_common.sql:326: ERROR: hypertable is a continuous aggregate materialization table -\set ON_ERROR_STOP 1 -CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('metrics', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (8,public,metrics,t) -(1 row) - -\else -SELECT create_hypertable('metrics','time'); -\endif -INSERT INTO metrics SELECT generate_series('2000-01-01'::timestamptz,'2000-01-10','1m'),1,0.25,0.75; --- check expressions in view definition -CREATE MATERIALIZED VIEW cagg_expr - WITH (timescaledb.continuous, timescaledb.materialized_only=true) -AS -SELECT - time_bucket('1d', time) AS time, - 'Const'::text AS Const, - 4.3::numeric AS "numeric", - first(metrics,time), - CASE WHEN true THEN 'foo' ELSE 'bar' END, - COALESCE(NULL,'coalesce'), - avg(v1) + avg(v2) AS avg1, - avg(v1+v2) AS avg2 -FROM metrics -GROUP BY 1 WITH NO DATA; -CALL refresh_continuous_aggregate('cagg_expr', NULL, NULL); -SELECT * FROM cagg_expr ORDER BY time LIMIT 5; - time | const | numeric | first | case | coalesce | avg1 | avg2 -------------------------------+-------+---------+----------------------------------------------+------+----------+------+------ - Fri Dec 31 16:00:00 1999 PST | Const | 4.3 | ("Sat Jan 01 00:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 - Sat Jan 01 16:00:00 2000 PST | Const | 4.3 | ("Sat Jan 01 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 - Sun Jan 02 16:00:00 2000 PST | Const | 4.3 | ("Sun Jan 02 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 - Mon Jan 03 16:00:00 2000 PST | Const | 4.3 | ("Mon Jan 03 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 - Tue Jan 04 16:00:00 2000 PST | Const | 4.3 | ("Tue Jan 04 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 -(5 rows) - ---test materialization of invalidation before drop -DROP TABLE IF EXISTS drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:358: NOTICE: table "drop_chunks_table" does not exist, skipping -DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; -psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk -CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT hypertable_id AS drop_chunks_table_nid - FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset -\else -SELECT hypertable_id AS drop_chunks_table_nid - FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset -\endif -CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; -$DIST$); -\endif -SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test2'); - set_integer_now_func ----------------------- - -(1 row) - -CREATE MATERIALIZED VIEW drop_chunks_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('5', time), max(data) - FROM drop_chunks_table - GROUP BY 1 WITH NO DATA; -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; ---dropping chunks will process the invalidations -SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); - drop_chunks ------------------------------------------------ - _timescaledb_internal._dist_hyper_10_13_chunk -(1 row) - -SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; - time | data -------+------ - 10 | 10 -(1 row) - -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(20, 35) AS i; -CALL refresh_continuous_aggregate('drop_chunks_view', 10, 40); ---this will be seen after the drop its within the invalidation window and will be dropped -INSERT INTO drop_chunks_table VALUES (26, 100); ---this will not be processed by the drop since chunk 30-39 is not dropped but will be seen after refresh ---shows that the drop doesn't do more work than necessary -INSERT INTO drop_chunks_table VALUES (31, 200); ---move the time up to 39 -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(35, 39) AS i; ---the chunks and ranges we have thus far -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table'; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_14_chunk | 10 | 20 - _dist_hyper_10_15_chunk | 20 | 30 - _dist_hyper_10_16_chunk | 30 | 40 -(3 rows) - ---the invalidation on 25 not yet seen -SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; - time_bucket | max --------------+----- - 35 | 35 - 30 | 34 - 25 | 29 - 20 | 24 - 15 | 19 - 10 | 14 -(6 rows) - ---refresh to process the invalidations and then drop -CALL refresh_continuous_aggregate('drop_chunks_view', NULL, (integer_now_test2()-9)); -SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); - drop_chunks ------------------------------------------------ - _timescaledb_internal._dist_hyper_10_14_chunk - _timescaledb_internal._dist_hyper_10_15_chunk -(2 rows) - ---new values on 25 now seen in view -SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; - time_bucket | max --------------+----- - 35 | 35 - 30 | 34 - 25 | 100 - 20 | 24 - 15 | 19 - 10 | 14 -(6 rows) - ---earliest datapoint now in table -SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; - time | data -------+------ - 30 | 30 -(1 row) - ---we see the chunks row with the dropped flags set; -SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped; - id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk -----+---------------+-----------------------+-------------------------+---------------------+---------+--------+----------- - 13 | 10 | _timescaledb_internal | _dist_hyper_10_13_chunk | | t | 0 | f - 14 | 10 | _timescaledb_internal | _dist_hyper_10_14_chunk | | t | 0 | f - 15 | 10 | _timescaledb_internal | _dist_hyper_10_15_chunk | | t | 0 | f -(3 rows) - ---still see data in the view -SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC; - time_bucket | max --------------+----- - 25 | 100 - 20 | 24 - 15 | 19 - 10 | 14 -(4 rows) - ---no data but covers dropped chunks -SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; - time | data -------+------ -(0 rows) - ---recreate the dropped chunk -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; ---see data from recreated region -SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; - time | data -------+------ - 20 | 20 - 19 | 19 - 18 | 18 - 17 | 17 - 16 | 16 - 15 | 15 - 14 | 14 - 13 | 13 - 12 | 12 - 11 | 11 - 10 | 10 - 9 | 9 - 8 | 8 - 7 | 7 - 6 | 6 - 5 | 5 - 4 | 4 - 3 | 3 - 2 | 2 - 1 | 1 - 0 | 0 -(21 rows) - ---should show chunk with old name and old ranges -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table' -ORDER BY range_start_integer; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_13_chunk | 0 | 10 - _dist_hyper_10_14_chunk | 10 | 20 - _dist_hyper_10_15_chunk | 20 | 30 - _dist_hyper_10_16_chunk | 30 | 40 -(4 rows) - ---We dropped everything up to the bucket starting at 30 and then ---inserted new data up to and including time 20. Therefore, the ---dropped data should stay the same as long as we only refresh ---buckets that have non-dropped data. -CALL refresh_continuous_aggregate('drop_chunks_view', 30, 40); -SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; - time_bucket | max --------------+----- - 35 | 39 - 30 | 200 - 25 | 100 - 20 | 24 - 15 | 19 - 10 | 14 -(6 rows) - -SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, - schema_name AS drop_chunks_mat_schema, - table_name AS drop_chunks_mat_table_name - FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg - WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_nid - AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset --- TEST drop chunks from continuous aggregates by specifying view name -SELECT drop_chunks('drop_chunks_view', - newer_than => -20, - verbose => true); -psql:include/cagg_ddl_common.sql:454: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk - drop_chunks ------------------------------------------- - _timescaledb_internal._hyper_11_17_chunk -(1 row) - --- Test that we cannot drop chunks when specifying materialized --- hypertable -INSERT INTO drop_chunks_table SELECT generate_series(45, 55), 500; -CALL refresh_continuous_aggregate('drop_chunks_view', 45, 55); -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer; - chunk_name | range_start_integer | range_end_integer ---------------------+---------------------+------------------- - _hyper_11_20_chunk | 0 | 100 -(1 row) - -\set ON_ERROR_STOP 0 -\set VERBOSITY default -SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); -psql:include/cagg_ddl_common.sql:466: ERROR: operation not supported on materialized hypertable -DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. -HINT: Try the operation on the continuous aggregate instead. -\set VERBOSITY terse -\set ON_ERROR_STOP 1 ------------------------------------------------------------------ --- Test that refresh_continuous_aggregate on chunk will refresh, --- but only in the regions covered by the show chunks. ------------------------------------------------------------------ -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table' -ORDER BY 2,3; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_13_chunk | 0 | 10 - _dist_hyper_10_14_chunk | 10 | 20 - _dist_hyper_10_15_chunk | 20 | 30 - _dist_hyper_10_16_chunk | 30 | 40 - _dist_hyper_10_18_chunk | 40 | 50 - _dist_hyper_10_19_chunk | 50 | 60 -(6 rows) - --- Pick the second chunk as the one to drop -WITH numbered_chunks AS ( - SELECT row_number() OVER (ORDER BY range_start_integer), chunk_schema, chunk_name, range_start_integer, range_end_integer - FROM timescaledb_information.chunks - WHERE hypertable_name = 'drop_chunks_table' - ORDER BY 1 -) -SELECT format('%I.%I', chunk_schema, chunk_name) AS chunk_to_drop, range_start_integer, range_end_integer -FROM numbered_chunks -WHERE row_number = 2 \gset --- There's data in the table for the chunk/range we will drop -SELECT * FROM drop_chunks_table -WHERE time >= :range_start_integer -AND time < :range_end_integer -ORDER BY 1; - time | data -------+------ - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 -(10 rows) - --- Make sure there is also data in the continuous aggregate --- CARE: --- Note that this behaviour of dropping the materialization table chunks and expecting a refresh --- that overlaps that time range to NOT update those chunks is undefined. Since CAGGs over --- distributed hypertables merge the invalidations the refresh region is updated in the distributed --- case, which may be different than what happens in the normal hypertable case. The command was: --- SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); -CALL refresh_continuous_aggregate('drop_chunks_view', 0, 50); -SELECT * FROM drop_chunks_view -ORDER BY 1; - time_bucket | max --------------+----- - 0 | 4 - 5 | 9 - 10 | 14 - 15 | 19 - 20 | 20 - 30 | 200 - 35 | 39 - 45 | 500 - 50 | 500 -(9 rows) - --- Drop the second chunk, to leave a gap in the data -\if :IS_DISTRIBUTED -CALL distributed_exec(format('DROP TABLE IF EXISTS %s', :'chunk_to_drop')); -DROP FOREIGN TABLE :chunk_to_drop; -\else -DROP TABLE :chunk_to_drop; -\endif --- Verify that the second chunk is dropped -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table' -ORDER BY 2,3; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_13_chunk | 0 | 10 - _dist_hyper_10_15_chunk | 20 | 30 - _dist_hyper_10_16_chunk | 30 | 40 - _dist_hyper_10_18_chunk | 40 | 50 - _dist_hyper_10_19_chunk | 50 | 60 -(5 rows) - --- Data is no longer in the table but still in the view -SELECT * FROM drop_chunks_table -WHERE time >= :range_start_integer -AND time < :range_end_integer -ORDER BY 1; - time | data -------+------ -(0 rows) - -SELECT * FROM drop_chunks_view -WHERE time_bucket >= :range_start_integer -AND time_bucket < :range_end_integer -ORDER BY 1; - time_bucket | max --------------+----- - 10 | 14 - 15 | 19 -(2 rows) - --- Insert a large value in one of the chunks that will be dropped -INSERT INTO drop_chunks_table VALUES (:range_start_integer-1, 100); --- Now refresh and drop the two adjecent chunks -CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30); -SELECT drop_chunks('drop_chunks_table', older_than=>30); - drop_chunks ------------------------------------------------ - _timescaledb_internal._dist_hyper_10_13_chunk - _timescaledb_internal._dist_hyper_10_15_chunk -(2 rows) - --- Verify that the chunks are dropped -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table' -ORDER BY 2,3; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_16_chunk | 30 | 40 - _dist_hyper_10_18_chunk | 40 | 50 - _dist_hyper_10_19_chunk | 50 | 60 -(3 rows) - --- The continuous aggregate should be refreshed in the regions covered --- by the dropped chunks, but not in the "gap" region, i.e., the --- region of the chunk that was dropped via DROP TABLE. -SELECT * FROM drop_chunks_view -ORDER BY 1; - time_bucket | max --------------+----- - 0 | 4 - 5 | 100 - 20 | 20 - 30 | 200 - 35 | 39 - 45 | 500 - 50 | 500 -(7 rows) - --- Now refresh in the region of the first two dropped chunks -CALL refresh_continuous_aggregate('drop_chunks_view', 0, :range_end_integer); --- Aggregate data in the refreshed range should no longer exist since --- the underlying data was dropped. -SELECT * FROM drop_chunks_view -ORDER BY 1; - time_bucket | max --------------+----- - 20 | 20 - 30 | 200 - 35 | 39 - 45 | 500 - 50 | 500 -(5 rows) - --------------------------------------------------------------------- --- Check that we can create a materialized table in a tablespace. We --- create one with tablespace and one without and compare them. -CREATE VIEW cagg_info AS -WITH - caggs AS ( - SELECT format('%I.%I', user_view_schema, user_view_name)::regclass AS user_view, - format('%I.%I', direct_view_schema, direct_view_name)::regclass AS direct_view, - format('%I.%I', partial_view_schema, partial_view_name)::regclass AS partial_view, - format('%I.%I', ht.schema_name, ht.table_name)::regclass AS mat_relid - FROM _timescaledb_catalog.hypertable ht, - _timescaledb_catalog.continuous_agg cagg - WHERE ht.id = cagg.mat_hypertable_id - ) -SELECT user_view, - pg_get_userbyid(relowner) AS user_view_owner, - relname AS mat_table, - (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = mat_relid) AS mat_table_owner, - direct_view, - (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = direct_view) AS direct_view_owner, - partial_view, - (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = partial_view) AS partial_view_owner, - (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace - FROM pg_class JOIN caggs ON pg_class.oid = caggs.mat_relid; -GRANT SELECT ON cagg_info TO PUBLIC; -CREATE VIEW chunk_info AS -SELECT ht.schema_name, ht.table_name, relname AS chunk_name, - (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace - FROM pg_class c, - _timescaledb_catalog.hypertable ht, - _timescaledb_catalog.chunk ch - WHERE ch.table_name = c.relname AND ht.id = ch.hypertable_id; -CREATE TABLE whatever(time BIGINT NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT hypertable_id AS whatever_nid - FROM create_distributed_hypertable('whatever', 'time', chunk_time_interval => 10, replication_factor => 2) -\gset -\else -SELECT hypertable_id AS whatever_nid - FROM create_hypertable('whatever', 'time', chunk_time_interval => 10) -\gset -\endif -SELECT set_integer_now_func('whatever', 'integer_now_test'); - set_integer_now_func ----------------------- - -(1 row) - -CREATE MATERIALIZED VIEW whatever_view_1 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT time_bucket('5', time), COUNT(data) - FROM whatever GROUP BY 1 WITH NO DATA; -CREATE MATERIALIZED VIEW whatever_view_2 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) -TABLESPACE tablespace1 AS -SELECT time_bucket('5', time), COUNT(data) - FROM whatever GROUP BY 1 WITH NO DATA; -INSERT INTO whatever SELECT i, i FROM generate_series(0, 29) AS i; -CALL refresh_continuous_aggregate('whatever_view_1', NULL, NULL); -CALL refresh_continuous_aggregate('whatever_view_2', NULL, NULL); -SELECT user_view, - mat_table, - cagg_info.tablespace AS mat_tablespace, - chunk_name, - chunk_info.tablespace AS chunk_tablespace - FROM cagg_info, chunk_info - WHERE mat_table::text = table_name - AND user_view::text LIKE 'whatever_view%'; - user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace ------------------+-----------------------------+----------------+--------------------+------------------ - whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk | - whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 -(2 rows) - -ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2; -SELECT user_view, - mat_table, - cagg_info.tablespace AS mat_tablespace, - chunk_name, - chunk_info.tablespace AS chunk_tablespace - FROM cagg_info, chunk_info - WHERE mat_table::text = table_name - AND user_view::text LIKE 'whatever_view%'; - user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace ------------------+-----------------------------+----------------+--------------------+------------------ - whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2 - whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 -(2 rows) - -DROP MATERIALIZED VIEW whatever_view_1; -psql:include/cagg_ddl_common.sql:644: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk -DROP MATERIALIZED VIEW whatever_view_2; -psql:include/cagg_ddl_common.sql:645: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk --- test bucket width expressions on integer hypertables -CREATE TABLE metrics_int2 ( - time int2 NOT NULL, - device_id int, - v1 float, - v2 float -); -CREATE TABLE metrics_int4 ( - time int4 NOT NULL, - device_id int, - v1 float, - v2 float -); -CREATE TABLE metrics_int8 ( - time int8 NOT NULL, - device_id int, - v1 float, - v2 float -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10, replication_factor => 2) -FROM ( - VALUES ('int2'), - ('int4'), - ('int8')) v (dt); - create_distributed_hypertable -------------------------------- - (15,public,metrics_int2,t) - (16,public,metrics_int4,t) - (17,public,metrics_int8,t) -(3 rows) - -\else -SELECT create_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10) -FROM ( - VALUES ('int2'), - ('int4'), - ('int8')) v (dt); -\endif -CREATE OR REPLACE FUNCTION int2_now () - RETURNS int2 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int2 -$$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION int2_now () - RETURNS int2 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int2 -$$; -$DIST$); -\endif -CREATE OR REPLACE FUNCTION int4_now () - RETURNS int4 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int4 -$$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION int4_now () - RETURNS int4 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int4 -$$; -$DIST$); -\endif -CREATE OR REPLACE FUNCTION int8_now () - RETURNS int8 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int8 -$$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION int8_now () - RETURNS int8 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int8 -$$; -$DIST$); -\endif -SELECT set_integer_now_func (('metrics_' || dt)::regclass, (dt || '_now')::regproc) -FROM ( - VALUES ('int2'), - ('int4'), - ('int8')) v (dt); - set_integer_now_func ----------------------- - - - -(3 rows) - --- width expression for int2 hypertables -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1::smallint, time) -FROM metrics_int2 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:750: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1::smallint + 2::smallint, time) -FROM metrics_int2 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:757: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; --- width expression for int4 hypertables -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1, time) -FROM metrics_int4 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:765: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1 + 2, time) -FROM metrics_int4 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:772: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; --- width expression for int8 hypertables -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1, time) -FROM metrics_int8 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:780: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1 + 2, time) -FROM metrics_int8 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:787: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; -\set ON_ERROR_STOP 0 --- non-immutable expresions should be rejected -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(extract(year FROM now())::smallint, time) -FROM metrics_int2 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:796: ERROR: only immutable expressions allowed in time bucket function -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(extract(year FROM now())::int, time) -FROM metrics_int4 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(extract(year FROM now())::int, time) -FROM metrics_int8 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function -\set ON_ERROR_STOP 1 --- Test various ALTER MATERIALIZED VIEW statements. -SET ROLE :ROLE_DEFAULT_PERM_USER; -CREATE MATERIALIZED VIEW owner_check WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1 + 2, time) -FROM metrics_int8 -GROUP BY 1 -WITH NO DATA; -\x on -SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; --[ RECORD 1 ]------+--------------------------------------- -user_view | owner_check -user_view_owner | default_perm_user -mat_table | _materialized_hypertable_24 -mat_table_owner | default_perm_user -direct_view | _timescaledb_internal._direct_view_24 -direct_view_owner | default_perm_user -partial_view | _timescaledb_internal._partial_view_24 -partial_view_owner | default_perm_user -tablespace | - -\x off --- This should not work since the target user has the wrong role, but --- we test that the normal checks are done when changing the owner. -\set ON_ERROR_STOP 0 -ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; -psql:include/cagg_ddl_common.sql:826: ERROR: must be member of role "test_role_1" -\set ON_ERROR_STOP 1 --- Superuser can always change owner -SET ROLE :ROLE_CLUSTER_SUPERUSER; -ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; -\x on -SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; --[ RECORD 1 ]------+--------------------------------------- -user_view | owner_check -user_view_owner | test_role_1 -mat_table | _materialized_hypertable_24 -mat_table_owner | test_role_1 -direct_view | _timescaledb_internal._direct_view_24 -direct_view_owner | test_role_1 -partial_view | _timescaledb_internal._partial_view_24 -partial_view_owner | test_role_1 -tablespace | - -\x off --- --- Test drop continuous aggregate cases --- --- Issue: #2608 --- -CREATE OR REPLACE FUNCTION test_int_now() - RETURNS INT LANGUAGE SQL STABLE AS -$BODY$ - SELECT 50; -$BODY$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ - CREATE OR REPLACE FUNCTION test_int_now() - RETURNS INT LANGUAGE SQL STABLE AS - $BODY$ - SELECT 50; - $BODY$; -$DIST$); -\endif -CREATE TABLE conditionsnm(time_int INT NOT NULL, device INT, value FLOAT); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10, replication_factor => 2); - create_distributed_hypertable -------------------------------- - (25,public,conditionsnm,t) -(1 row) - -\else -SELECT create_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10); -\endif -SELECT set_integer_now_func('conditionsnm', 'test_int_now'); - set_integer_now_func ----------------------- - -(1 row) - -INSERT INTO conditionsnm -SELECT time_val, time_val % 4, 3.14 FROM generate_series(0,100,1) AS time_val; --- Case 1: DROP -CREATE MATERIALIZED VIEW conditionsnm_4 -WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) -AS -SELECT time_bucket(7, time_int) as bucket, -SUM(value), COUNT(value) -FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:874: NOTICE: refreshing continuous aggregate "conditionsnm_4" -DROP materialized view conditionsnm_4; -psql:include/cagg_ddl_common.sql:876: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk --- Case 2: DROP CASCADE should have similar behaviour as DROP -CREATE MATERIALIZED VIEW conditionsnm_4 -WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) -AS -SELECT time_bucket(7, time_int) as bucket, -SUM(value), COUNT(value) -FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:884: NOTICE: refreshing continuous aggregate "conditionsnm_4" -DROP materialized view conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:886: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk --- Case 3: require CASCADE in case of dependent object -CREATE MATERIALIZED VIEW conditionsnm_4 -WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) -AS -SELECT time_bucket(7, time_int) as bucket, -SUM(value), COUNT(value) -FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:894: NOTICE: refreshing continuous aggregate "conditionsnm_4" -CREATE VIEW see_cagg as select * from conditionsnm_4; -\set ON_ERROR_STOP 0 -DROP MATERIALIZED VIEW conditionsnm_4; -psql:include/cagg_ddl_common.sql:898: ERROR: cannot drop view conditionsnm_4 because other objects depend on it -\set ON_ERROR_STOP 1 --- Case 4: DROP CASCADE with dependency -DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to view see_cagg -psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk --- Test DROP SCHEMA CASCADE with continuous aggregates --- --- Issue: #2350 --- --- Case 1: DROP SCHEMA CASCADE -CREATE SCHEMA test_schema; -CREATE TABLE test_schema.telemetry_raw ( - ts TIMESTAMP WITH TIME ZONE NOT NULL, - value DOUBLE PRECISION -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); - create_distributed_hypertable ----------------------------------- - (29,test_schema,telemetry_raw,t) -(1 row) - -\else -SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); -\endif -CREATE MATERIALIZED VIEW test_schema.telemetry_1s - WITH (timescaledb.continuous, timescaledb.materialized_only=false) - AS -SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, - avg(value) - FROM test_schema.telemetry_raw - GROUP BY ts_1s WITH NO DATA; -SELECT ca.raw_hypertable_id, - h.schema_name, - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'telemetry_1s'; - raw_hypertable_id | schema_name | MAT_TABLE_NAME | PART_VIEW_NAME | partial_view_schema --------------------+-----------------------+-----------------------------+------------------+----------------------- - 29 | _timescaledb_internal | _materialized_hypertable_30 | _partial_view_30 | _timescaledb_internal -(1 row) - -\gset -DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:941: NOTICE: drop cascades to 4 other objects -SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = 'telemetry_1s'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; - count -------- - 0 -(1 row) - --- Case 2: DROP SCHEMA CASCADE with multiple caggs -CREATE SCHEMA test_schema; -CREATE TABLE test_schema.telemetry_raw ( - ts TIMESTAMP WITH TIME ZONE NOT NULL, - value DOUBLE PRECISION -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); - create_distributed_hypertable ----------------------------------- - (31,test_schema,telemetry_raw,t) -(1 row) - -\else -SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); -\endif -CREATE MATERIALIZED VIEW test_schema.cagg1 - WITH (timescaledb.continuous, timescaledb.materialized_only=false) - AS -SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, - avg(value) - FROM test_schema.telemetry_raw - GROUP BY ts_1s WITH NO DATA; -CREATE MATERIALIZED VIEW test_schema.cagg2 - WITH (timescaledb.continuous, timescaledb.materialized_only=false) - AS -SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, - avg(value) - FROM test_schema.telemetry_raw - GROUP BY ts_1s WITH NO DATA; -SELECT ca.raw_hypertable_id, - h.schema_name, - h.table_name AS "MAT_TABLE_NAME1", - partial_view_name as "PART_VIEW_NAME1", - partial_view_schema -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'cagg1'; - raw_hypertable_id | schema_name | MAT_TABLE_NAME1 | PART_VIEW_NAME1 | partial_view_schema --------------------+-----------------------+-----------------------------+------------------+----------------------- - 31 | _timescaledb_internal | _materialized_hypertable_32 | _partial_view_32 | _timescaledb_internal -(1 row) - -\gset -SELECT ca.raw_hypertable_id, - h.schema_name, - h.table_name AS "MAT_TABLE_NAME2", - partial_view_name as "PART_VIEW_NAME2", - partial_view_schema -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'cagg2'; - raw_hypertable_id | schema_name | MAT_TABLE_NAME2 | PART_VIEW_NAME2 | partial_view_schema --------------------+-----------------------+-----------------------------+------------------+----------------------- - 31 | _timescaledb_internal | _materialized_hypertable_33 | _partial_view_33 | _timescaledb_internal -(1 row) - -\gset -DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:998: NOTICE: drop cascades to 7 other objects -SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME1'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = 'cagg1'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME2'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME2'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = 'cagg2'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; - count -------- - 0 -(1 row) - -DROP TABLESPACE tablespace1; -DROP TABLESPACE tablespace2; --- Check that we can rename a column of a materialized view and still --- rebuild it after (#3051, #3405) -CREATE TABLE conditions ( - time TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (34,public,conditions,t) -(1 row) - -\else -SELECT create_hypertable('conditions', 'time'); -\endif -INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); -INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); -INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); -INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); -INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); -INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); -INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); -INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); -INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); -INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); -INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); -CREATE MATERIALIZED VIEW conditions_daily -WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS -SELECT location, - time_bucket(INTERVAL '1 day', time) AS bucket, - AVG(temperature) - FROM conditions -GROUP BY location, bucket -WITH NO DATA; -SELECT format('%I.%I', '_timescaledb_internal', h.table_name) AS "MAT_TABLE_NAME", - format('%I.%I', '_timescaledb_internal', partial_view_name) AS "PART_VIEW_NAME", - format('%I.%I', '_timescaledb_internal', direct_view_name) AS "DIRECT_VIEW_NAME" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'conditions_daily' -\gset --- Show both the columns and the view definitions to see that --- references are correct in the view as well. -SELECT * FROM test.show_columns('conditions_daily'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - bucket | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - bucket | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - bucket | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - bucket | timestamp with time zone | t - avg | double precision | f -(3 rows) - -ALTER MATERIALIZED VIEW conditions_daily RENAME COLUMN bucket to "time"; --- Show both the columns and the view definitions to see that --- references are correct in the view as well. -SELECT * FROM test.show_columns(' conditions_daily'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - time | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - time | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - time | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - time | timestamp with time zone | t - avg | double precision | f -(3 rows) - --- This will rebuild the materialized view and should succeed. -ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only = false); --- Refresh the continuous aggregate to check that it works after the --- rename. -\set VERBOSITY verbose -CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); -\set VERBOSITY terse --- --- Indexes on continuous aggregate --- -\set ON_ERROR_STOP 0 --- unique indexes are not supported -CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); -psql:include/cagg_ddl_common.sql:1084: ERROR: continuous aggregates do not support UNIQUE indexes --- concurrently index creation not supported -CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); -psql:include/cagg_ddl_common.sql:1086: ERROR: hypertables do not support concurrent index creation -\set ON_ERROR_STOP 1 -CREATE INDEX index_avg ON conditions_daily (avg); -CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); -CREATE INDEX index_avg_include ON conditions_daily (avg) INCLUDE (location); -CREATE INDEX index_avg_expr ON conditions_daily ((avg + 1)); -CREATE INDEX index_avg_location_sfo ON conditions_daily (avg) WHERE location = 'SFO'; -CREATE INDEX index_avg_expr_location_sfo ON conditions_daily ((avg + 2)) WHERE location = 'SFO'; -SELECT * FROM test.show_indexespred(:'MAT_TABLE_NAME'); - Index | Columns | Expr | Pred | Unique | Primary | Exclusion | Tablespace ------------------------------------------------------------------------+-------------------+---------------------------+------------------------+--------+---------+-----------+------------ - _timescaledb_internal._materialized_hypertable_35_bucket_idx | {bucket} | | | f | f | f | - _timescaledb_internal._materialized_hypertable_35_location_bucket_idx | {location,bucket} | | | f | f | f | - _timescaledb_internal.index_avg | {avg} | | | f | f | f | - _timescaledb_internal.index_avg_expr | {expr} | avg + 1::double precision | | f | f | f | - _timescaledb_internal.index_avg_expr_location_sfo | {expr} | avg + 2::double precision | location = 'SFO'::text | f | f | f | - _timescaledb_internal.index_avg_include | {avg,location} | | | f | f | f | - _timescaledb_internal.index_avg_location_sfo | {avg} | | location = 'SFO'::text | f | f | f | - _timescaledb_internal.index_avg_only | {avg} | | | f | f | f | -(8 rows) - --- #3696 assertion failure when referencing columns not present in result -CREATE TABLE i3696(time timestamptz NOT NULL, search_query text, cnt integer, cnt2 integer); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('i3696', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (36,public,i3696,t) -(1 row) - -\else -SELECT table_name FROM create_hypertable('i3696','time'); -\endif -CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) -AS - SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket - FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; -psql:include/cagg_ddl_common.sql:1108: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date -ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); -CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous, timescaledb.materialized_only=false) -AS - SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket - FROM i3696 GROUP BY cnt + cnt2, bucket, search_query - HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; -psql:include/cagg_ddl_common.sql:1116: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date -ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); ---TEST test with multiple settings on continuous aggregates -- --- test for materialized_only + compress combinations (real time aggs enabled initially) -CREATE TABLE test_setting(time timestamptz not null, val numeric); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('test_setting', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (39,public,test_setting,t) -(1 row) - -\else -SELECT create_hypertable('test_setting', 'time'); -\endif -CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only=false) -AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1130: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date -INSERT INTO test_setting -SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; -CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - ---this row is not in the materialized result --- -INSERT INTO test_setting VALUES( '2020-11-01', 20); ---try out 2 settings here -- -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1141: NOTICE: defaulting compress_orderby to time_bucket -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | t | t -(1 row) - ---real time aggs is off now , should return 20 -- -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - ---now set it back to false -- -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1149: NOTICE: defaulting compress_orderby to time_bucket -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | t | f -(1 row) - ---count should return additional data since we have real time aggs on -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 21 -(1 row) - -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | f | t -(1 row) - ---real time aggs is off now , should return 20 -- -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | f | f -(1 row) - ---count should return additional data since we have real time aggs on -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 21 -(1 row) - -DELETE FROM test_setting WHERE val = 20; ---TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- --- test for materialized_only + compress combinations (real time aggs enabled initially) -DROP MATERIALIZED VIEW test_setting_cagg; -psql:include/cagg_ddl_common.sql:1174: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk -CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) -AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1177: NOTICE: refreshing continuous aggregate "test_setting_cagg" -CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - ---this row is not in the materialized result --- -INSERT INTO test_setting VALUES( '2020-11-01', 20); ---try out 2 settings here -- -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1185: NOTICE: defaulting compress_orderby to time_bucket -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | t | f -(1 row) - ---count should return additional data since we have real time aggs on -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 21 -(1 row) - ---now set it back to false -- -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1193: NOTICE: defaulting compress_orderby to time_bucket -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | t | t -(1 row) - ---real time aggs is off now , should return 20 -- -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | f | f -(1 row) - ---count should return additional data since we have real time aggs on -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 21 -(1 row) - -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | f | t -(1 row) - ---real time aggs is off now , should return 20 -- -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - --- END TEST with multiple settings --- Test View Target Entries that contain both aggrefs and Vars in the same expression -CREATE TABLE transactions -( - "time" timestamp with time zone NOT NULL, - dummy1 integer, - dummy2 integer, - dummy3 integer, - dummy4 integer, - dummy5 integer, - amount integer, - fiat_value integer -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('transactions', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (46,public,transactions,t) -(1 row) - -\else -SELECT create_hypertable('transactions', 'time'); -\endif -INSERT INTO transactions VALUES ( '2018-01-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-01-02 09:30:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-01-02 09:20:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-01-02 09:10:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 10:40:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 11:50:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 12:10:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 13:10:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-11-02 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-02 10:30:00-08', 0, 0, 0, 0, 0, -1, 10); -CREATE materialized view cashflows( - bucket, - amount, - cashflow, - cashflow2 -) WITH ( - timescaledb.continuous, - timescaledb.materialized_only = true -) AS -SELECT time_bucket ('1 day', time) AS bucket, - amount, - CASE - WHEN amount < 0 THEN (0 - sum(fiat_value)) - ELSE sum(fiat_value) - END AS cashflow, - amount + sum(fiat_value) -FROM transactions -GROUP BY bucket, amount; -psql:include/cagg_ddl_common.sql:1267: NOTICE: refreshing continuous aggregate "cashflows" -SELECT h.table_name AS "MAT_TABLE_NAME", - partial_view_name AS "PART_VIEW_NAME", - direct_view_name AS "DIRECT_VIEW_NAME" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'cashflows' -\gset --- Show both the columns and the view definitions to see that --- references are correct in the view as well. -\d+ "_timescaledb_internal".:"DIRECT_VIEW_NAME" - View "_timescaledb_internal._direct_view_47" - Column | Type | Collation | Nullable | Default | Storage | Description ------------+--------------------------+-----------+----------+---------+---------+------------- - bucket | timestamp with time zone | | | | plain | - amount | integer | | | | plain | - cashflow | bigint | | | | plain | - cashflow2 | bigint | | | | plain | -View definition: - SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, - transactions.amount, - CASE - WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) - ELSE sum(transactions.fiat_value) - END AS cashflow, - transactions.amount + sum(transactions.fiat_value) AS cashflow2 - FROM transactions - GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; - -\d+ "_timescaledb_internal".:"PART_VIEW_NAME" - View "_timescaledb_internal._partial_view_47" - Column | Type | Collation | Nullable | Default | Storage | Description ------------+--------------------------+-----------+----------+---------+---------+------------- - bucket | timestamp with time zone | | | | plain | - amount | integer | | | | plain | - cashflow | bigint | | | | plain | - cashflow2 | bigint | | | | plain | -View definition: - SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, - transactions.amount, - CASE - WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) - ELSE sum(transactions.fiat_value) - END AS cashflow, - transactions.amount + sum(transactions.fiat_value) AS cashflow2 - FROM transactions - GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; - -\d+ "_timescaledb_internal".:"MAT_TABLE_NAME" - Table "_timescaledb_internal._materialized_hypertable_47" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ------------+--------------------------+-----------+----------+---------+---------+--------------+------------- - bucket | timestamp with time zone | | not null | | plain | | - amount | integer | | | | plain | | - cashflow | bigint | | | | plain | | - cashflow2 | bigint | | | | plain | | -Indexes: - "_materialized_hypertable_47_amount_bucket_idx" btree (amount, bucket DESC) - "_materialized_hypertable_47_bucket_idx" btree (bucket DESC) -Triggers: - ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_47 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() -Child tables: _timescaledb_internal._hyper_47_52_chunk, - _timescaledb_internal._hyper_47_53_chunk - -\d+ 'cashflows' - View "public.cashflows" - Column | Type | Collation | Nullable | Default | Storage | Description ------------+--------------------------+-----------+----------+---------+---------+------------- - bucket | timestamp with time zone | | | | plain | - amount | integer | | | | plain | - cashflow | bigint | | | | plain | - cashflow2 | bigint | | | | plain | -View definition: - SELECT _materialized_hypertable_47.bucket, - _materialized_hypertable_47.amount, - _materialized_hypertable_47.cashflow, - _materialized_hypertable_47.cashflow2 - FROM _timescaledb_internal._materialized_hypertable_47; - -SELECT * FROM cashflows; - bucket | amount | cashflow | cashflow2 -------------------------------+--------+----------+----------- - Sun Dec 31 16:00:00 2017 PST | 1 | 10 | 11 - Mon Jan 01 16:00:00 2018 PST | -1 | -30 | 29 - Wed Oct 31 17:00:00 2018 PDT | -1 | -20 | 19 - Wed Oct 31 17:00:00 2018 PDT | 1 | 30 | 31 - Thu Nov 01 17:00:00 2018 PDT | -1 | -10 | 9 - Thu Nov 01 17:00:00 2018 PDT | 1 | 10 | 11 -(6 rows) - --- test cagg creation with named arguments in time_bucket --- note that positional arguments cannot follow named arguments --- 1. test named origin --- 2. test named timezone --- 3. test named ts --- 4. test named bucket width --- named origin -CREATE MATERIALIZED VIEW cagg_named_origin WITH -(timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket('1h', time, 'UTC', origin => '2001-01-03 01:23:45') AS bucket, -avg(amount) as avg_amount -FROM transactions GROUP BY 1 WITH NO DATA; --- named timezone -CREATE MATERIALIZED VIEW cagg_named_tz_origin WITH -(timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket('1h', time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, -avg(amount) as avg_amount -FROM transactions GROUP BY 1 WITH NO DATA; --- named ts -CREATE MATERIALIZED VIEW cagg_named_ts_tz_origin WITH -(timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket('1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, -avg(amount) as avg_amount -FROM transactions GROUP BY 1 WITH NO DATA; --- named bucket width -CREATE MATERIALIZED VIEW cagg_named_all WITH -(timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(bucket_width => '1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, -avg(amount) as avg_amount -FROM transactions GROUP BY 1 WITH NO DATA; --- Refreshing from the beginning (NULL) of a CAGG with variable time bucket and --- using an INTERVAL for the end timestamp (issue #5534) -CREATE MATERIALIZED VIEW transactions_montly -WITH (timescaledb.continuous, timescaledb.materialized_only = true) AS -SELECT time_bucket(INTERVAL '1 month', time) AS bucket, - SUM(fiat_value), - MAX(fiat_value), - MIN(fiat_value) - FROM transactions -GROUP BY 1 -WITH NO DATA; --- No rows -SELECT * FROM transactions_montly ORDER BY bucket; - bucket | sum | max | min ---------+-----+-----+----- -(0 rows) - --- Refresh from beginning of the CAGG for 1 month -CALL refresh_continuous_aggregate('transactions_montly', NULL, INTERVAL '1 month'); -SELECT * FROM transactions_montly ORDER BY bucket; - bucket | sum | max | min -------------------------------+-----+-----+----- - Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 - Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 -(2 rows) - -TRUNCATE transactions_montly; --- Partial refresh the CAGG from beginning to an specific timestamp -CALL refresh_continuous_aggregate('transactions_montly', NULL, '2018-11-01 11:50:00-08'::timestamptz); -SELECT * FROM transactions_montly ORDER BY bucket; - bucket | sum | max | min -------------------------------+-----+-----+----- - Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 -(1 row) - --- Full refresh the CAGG -CALL refresh_continuous_aggregate('transactions_montly', NULL, NULL); -SELECT * FROM transactions_montly ORDER BY bucket; - bucket | sum | max | min -------------------------------+-----+-----+----- - Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 - Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 -(2 rows) - --- Check set_chunk_time_interval on continuous aggregate -CREATE MATERIALIZED VIEW cagg_set_chunk_time_interval -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(INTERVAL '1 month', time) AS bucket, - SUM(fiat_value), - MAX(fiat_value), - MIN(fiat_value) -FROM transactions -GROUP BY 1 -WITH NO DATA; -SELECT set_chunk_time_interval('cagg_set_chunk_time_interval', chunk_time_interval => interval '1 month'); - set_chunk_time_interval -------------------------- - -(1 row) - -CALL refresh_continuous_aggregate('cagg_set_chunk_time_interval', NULL, NULL); -SELECT _timescaledb_functions.to_interval(d.interval_length) = interval '1 month' -FROM _timescaledb_catalog.dimension d - RIGHT JOIN _timescaledb_catalog.continuous_agg ca ON ca.user_view_name = 'cagg_set_chunk_time_interval' -WHERE d.hypertable_id = ca.mat_hypertable_id; - ?column? ----------- - t -(1 row) - --- Since #6077 CAggs are materialized only by default -DROP TABLE conditions CASCADE; -psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 3 other objects -psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 2 other objects -CREATE TABLE conditions ( - time TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (54,public,conditions,t) -(1 row) - -\else -SELECT create_hypertable('conditions', 'time'); -\endif -INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); -INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); -INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); -INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); -INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); -INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); -INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); -INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); -INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); -INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); -INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); -CREATE MATERIALIZED VIEW conditions_daily -WITH (timescaledb.continuous) AS -SELECT location, - time_bucket(INTERVAL '1 day', time) AS bucket, - AVG(temperature) - FROM conditions -GROUP BY location, bucket -WITH NO DATA; -\d+ conditions_daily - View "public.conditions_daily" - Column | Type | Collation | Nullable | Default | Storage | Description -----------+--------------------------+-----------+----------+---------+----------+------------- - location | text | | | | extended | - bucket | timestamp with time zone | | | | plain | - avg | double precision | | | | plain | -View definition: - SELECT _materialized_hypertable_55.location, - _materialized_hypertable_55.bucket, - _materialized_hypertable_55.avg - FROM _timescaledb_internal._materialized_hypertable_55; - --- Should return NO ROWS -SELECT * FROM conditions_daily ORDER BY bucket, avg; - location | bucket | avg -----------+--------+----- -(0 rows) - -ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=false); -\d+ conditions_daily - View "public.conditions_daily" - Column | Type | Collation | Nullable | Default | Storage | Description -----------+--------------------------+-----------+----------+---------+----------+------------- - location | text | | | | extended | - bucket | timestamp with time zone | | | | plain | - avg | double precision | | | | plain | -View definition: - SELECT _materialized_hypertable_55.location, - _materialized_hypertable_55.bucket, - _materialized_hypertable_55.avg - FROM _timescaledb_internal._materialized_hypertable_55 - WHERE _materialized_hypertable_55.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) -UNION ALL - SELECT conditions.location, - time_bucket('@ 1 day'::interval, conditions."time") AS bucket, - avg(conditions.temperature) AS avg - FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) - GROUP BY conditions.location, (time_bucket('@ 1 day'::interval, conditions."time")); - --- Should return ROWS because now it is realtime -SELECT * FROM conditions_daily ORDER BY bucket, avg; - location | bucket | avg -----------+------------------------------+----- - SFO | Sun Dec 31 16:00:00 2017 PST | 55 - NYC | Mon Jan 01 16:00:00 2018 PST | 65 - SFO | Mon Jan 01 16:00:00 2018 PST | 65 - por | Mon Jan 01 16:00:00 2018 PST | 100 - NYC | Wed Oct 31 17:00:00 2018 PDT | 65 - NYC | Thu Nov 01 17:00:00 2018 PDT | 15 -(6 rows) - --- Should return ROWS because we refreshed it -ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=true); -\d+ conditions_daily - View "public.conditions_daily" - Column | Type | Collation | Nullable | Default | Storage | Description -----------+--------------------------+-----------+----------+---------+----------+------------- - location | text | | | | extended | - bucket | timestamp with time zone | | | | plain | - avg | double precision | | | | plain | -View definition: - SELECT _materialized_hypertable_55.location, - _materialized_hypertable_55.bucket, - _materialized_hypertable_55.avg - FROM _timescaledb_internal._materialized_hypertable_55; - -CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); -SELECT * FROM conditions_daily ORDER BY bucket, avg; - location | bucket | avg -----------+------------------------------+----- - SFO | Sun Dec 31 16:00:00 2017 PST | 55 - SFO | Mon Jan 01 16:00:00 2018 PST | 65 - NYC | Mon Jan 01 16:00:00 2018 PST | 65 - por | Mon Jan 01 16:00:00 2018 PST | 100 - NYC | Wed Oct 31 17:00:00 2018 PDT | 65 - NYC | Thu Nov 01 17:00:00 2018 PDT | 15 -(6 rows) - --- cleanup -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); -DROP DATABASE :DATA_NODE_3 WITH (FORCE); diff --git a/tsl/test/expected/cagg_ddl_dist_ht-14.out b/tsl/test/expected/cagg_ddl_dist_ht-14.out deleted file mode 100644 index 8c4e1394a03..00000000000 --- a/tsl/test/expected/cagg_ddl_dist_ht-14.out +++ /dev/null @@ -1,2207 +0,0 @@ --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. ------------------------------------- --- Set up a distributed environment ------------------------------------- -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 -\set DATA_NODE_3 :TEST_DBNAME _3 -\ir include/remote_exec.sql --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. -CREATE SCHEMA IF NOT EXISTS test; -psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping -GRANT USAGE ON SCHEMA test TO PUBLIC; -CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) -RETURNS VOID -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' -LANGUAGE C; -CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) -RETURNS TABLE("table_record" CSTRING[]) -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' -LANGUAGE C; -SELECT node_name, database, node_created, database_created, extension_created -FROM ( - SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* - FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) -) a; - node_name | database | node_created | database_created | extension_created ------------------------+-----------------------+--------------+------------------+------------------- - db_cagg_ddl_dist_ht_1 | db_cagg_ddl_dist_ht_1 | t | t | t - db_cagg_ddl_dist_ht_2 | db_cagg_ddl_dist_ht_2 | t | t | t - db_cagg_ddl_dist_ht_3 | db_cagg_ddl_dist_ht_3 | t | t | t -(3 rows) - -GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; --- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes -GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; -\set IS_DISTRIBUTED TRUE -\ir include/cagg_ddl_common.sql --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. --- Set this variable to avoid using a hard-coded path each time query --- results are compared -\set QUERY_RESULT_TEST_EQUAL_RELPATH '../../../../test/sql/include/query_result_test_equal.sql' -\if :IS_DISTRIBUTED -\echo 'Running distributed hypertable tests' -Running distributed hypertable tests -\else -\echo 'Running local hypertable tests' -\endif -SET ROLE :ROLE_DEFAULT_PERM_USER; ---DDL commands on continuous aggregates -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature integer NULL, - humidity DOUBLE PRECISION NULL, - timemeasure TIMESTAMPTZ, - timeinterval INTERVAL -); -\if :IS_DISTRIBUTED -SELECT table_name FROM create_distributed_hypertable('conditions', 'timec', replication_factor => 2); - table_name ------------- - conditions -(1 row) - -\else -SELECT table_name FROM create_hypertable('conditions', 'timec'); -\endif --- schema tests -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; -CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; -CREATE SCHEMA rename_schema; -GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER; -SET ROLE :ROLE_DEFAULT_PERM_USER; -CREATE TABLE foo(time TIMESTAMPTZ NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('foo', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (2,public,foo,t) -(1 row) - -\else -SELECT create_hypertable('foo', 'time'); -\endif -CREATE MATERIALIZED VIEW rename_test - WITH ( timescaledb.continuous, timescaledb.materialized_only=true) -AS SELECT time_bucket('1week', time), COUNT(data) - FROM foo - GROUP BY 1 WITH NO DATA; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+-----------------------+------------------- - public | rename_test | _timescaledb_internal | _partial_view_3 -(1 row) - -ALTER MATERIALIZED VIEW rename_test SET SCHEMA rename_schema; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+-----------------------+------------------- - rename_schema | rename_test | _timescaledb_internal | _partial_view_3 -(1 row) - -SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", - h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA", - direct_view_name as "DIR_VIEW_NAME", - direct_view_schema as "DIR_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'rename_test' -\gset -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER VIEW :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME" SET SCHEMA public; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+---------------------+------------------- - rename_schema | rename_test | public | _partial_view_3 -(1 row) - ---alter direct view schema -SELECT user_view_schema, user_view_name, direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | direct_view_schema | direct_view_name -------------------+----------------+-----------------------+------------------ - rename_schema | rename_test | _timescaledb_internal | _direct_view_3 -(1 row) - -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER VIEW :"DIR_VIEW_SCHEMA".:"DIR_VIEW_NAME" SET SCHEMA public; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+----------------+---------------------+-------------------+--------------------+------------------ - rename_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 -(1 row) - -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER SCHEMA rename_schema RENAME TO new_name_schema; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+----------------+---------------------+-------------------+--------------------+------------------ - new_name_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 -(1 row) - -ALTER VIEW :"PART_VIEW_NAME" SET SCHEMA new_name_schema; -ALTER VIEW :"DIR_VIEW_NAME" SET SCHEMA new_name_schema; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+----------------+---------------------+-------------------+--------------------+------------------ - new_name_schema | rename_test | new_name_schema | _partial_view_3 | new_name_schema | _direct_view_3 -(1 row) - -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER SCHEMA new_name_schema RENAME TO foo_name_schema; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+---------------------+------------------- - foo_name_schema | rename_test | foo_name_schema | _partial_view_3 -(1 row) - -ALTER MATERIALIZED VIEW foo_name_schema.rename_test SET SCHEMA public; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+---------------------+------------------- - public | rename_test | foo_name_schema | _partial_view_3 -(1 row) - -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER SCHEMA foo_name_schema RENAME TO rename_schema; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SET client_min_messages TO NOTICE; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+---------------------+------------------- - public | rename_test | rename_schema | _partial_view_3 -(1 row) - -ALTER MATERIALIZED VIEW rename_test RENAME TO rename_c_aggregate; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+--------------------+---------------------+------------------- - public | rename_c_aggregate | rename_schema | _partial_view_3 -(1 row) - -SELECT * FROM rename_c_aggregate; - time_bucket | count --------------+------- -(0 rows) - -ALTER VIEW rename_schema.:"PART_VIEW_NAME" RENAME TO partial_view; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+--------------------+---------------------+-------------------+--------------------+------------------ - public | rename_c_aggregate | rename_schema | partial_view | rename_schema | _direct_view_3 -(1 row) - ---rename direct view -ALTER VIEW rename_schema.:"DIR_VIEW_NAME" RENAME TO direct_view; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+--------------------+---------------------+-------------------+--------------------+------------------ - public | rename_c_aggregate | rename_schema | partial_view | rename_schema | direct_view -(1 row) - --- drop_chunks tests -DROP TABLE conditions CASCADE; -DROP TABLE foo CASCADE; -psql:include/cagg_ddl_common.sql:161: NOTICE: drop cascades to 2 other objects -CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT hypertable_id AS drop_chunks_table_id - FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset -\else -SELECT hypertable_id AS drop_chunks_table_id - FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset -\endif -CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; -$DIST$); -\endif -SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test'); - set_integer_now_func ----------------------- - -(1 row) - -CREATE MATERIALIZED VIEW drop_chunks_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('5', time), COUNT(data) - FROM drop_chunks_table - GROUP BY 1 WITH NO DATA; -SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table, - schema_name AS drop_chunks_mat_schema, - table_name AS drop_chunks_mat_table_name - FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg - WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id - AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset --- create 3 chunks, with 3 time bucket -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i; --- Only refresh up to bucket 15 initially. Matches the old refresh --- behavior that didn't materialize everything -CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); -SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; - count -------- - 3 -(1 row) - -SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; - count -------- - 1 -(1 row) - -SELECT * FROM drop_chunks_view ORDER BY 1; - time_bucket | count --------------+------- - 0 | 5 - 5 | 5 - 10 | 5 -(3 rows) - --- cannot drop directly from the materialization table without specifying --- cont. aggregate view name explicitly -\set ON_ERROR_STOP 0 -SELECT drop_chunks(:'drop_chunks_mat_table', - newer_than => -20, - verbose => true); -psql:include/cagg_ddl_common.sql:213: ERROR: operation not supported on materialized hypertable -\set ON_ERROR_STOP 1 -SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; - count -------- - 3 -(1 row) - -SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; - count -------- - 1 -(1 row) - -SELECT * FROM drop_chunks_view ORDER BY 1; - time_bucket | count --------------+------- - 0 | 5 - 5 | 5 - 10 | 5 -(3 rows) - --- drop chunks when the chunksize and time_bucket aren't aligned -DROP TABLE drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk -CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT hypertable_id AS drop_chunks_table_u_id - FROM create_distributed_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7, replication_factor => 2) \gset -\else -SELECT hypertable_id AS drop_chunks_table_u_id - FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset -\endif -CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; -$DIST$); -\endif -SELECT set_integer_now_func('drop_chunks_table_u', 'integer_now_test1'); - set_integer_now_func ----------------------- - -(1 row) - -CREATE MATERIALIZED VIEW drop_chunks_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('3', time), COUNT(data) - FROM drop_chunks_table_u - GROUP BY 1 WITH NO DATA; -SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table_u, - schema_name AS drop_chunks_mat_schema, - table_name AS drop_chunks_mat_table_u_name - FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg - WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id - AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset --- create 3 chunks, with 3 time bucket -INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i; --- Refresh up to bucket 15 to match old materializer behavior -CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); -SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; - count -------- - 4 -(1 row) - -SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; - count -------- - 1 -(1 row) - -SELECT * FROM drop_chunks_view ORDER BY 1; - time_bucket | count --------------+------- - 0 | 3 - 3 | 3 - 6 | 3 - 9 | 3 - 12 | 3 -(5 rows) - --- TRUNCATE test --- Can truncate regular hypertables that have caggs -TRUNCATE drop_chunks_table_u; -\set ON_ERROR_STOP 0 --- Can't truncate materialized hypertables directly -TRUNCATE :drop_chunks_mat_table_u; -psql:include/cagg_ddl_common.sql:271: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate -\set ON_ERROR_STOP 1 --- Check that we don't interfere with TRUNCATE of normal table and --- partitioned table -CREATE TABLE truncate (value int); -INSERT INTO truncate VALUES (1), (2); -TRUNCATE truncate; -SELECT * FROM truncate; - value -------- -(0 rows) - -CREATE TABLE truncate_partitioned (value int) - PARTITION BY RANGE(value); -CREATE TABLE truncate_p1 PARTITION OF truncate_partitioned - FOR VALUES FROM (1) TO (3); -INSERT INTO truncate_partitioned VALUES (1), (2); -TRUNCATE truncate_partitioned; -SELECT * FROM truncate_partitioned; - value -------- -(0 rows) - --- ALTER TABLE tests -\set ON_ERROR_STOP 0 --- test a variety of ALTER TABLE statements -ALTER TABLE :drop_chunks_mat_table_u RENAME time_bucket TO bad_name; -psql:include/cagg_ddl_common.sql:291: ERROR: renaming columns on materialization tables is not supported -ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(time_bucket); -psql:include/cagg_ddl_common.sql:292: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED; -psql:include/cagg_ddl_common.sql:293: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY; -psql:include/cagg_ddl_common.sql:294: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ADD COLUMN fizzle INTEGER; -psql:include/cagg_ddl_common.sql:295: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u DROP COLUMN time_bucket; -psql:include/cagg_ddl_common.sql:296: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket DROP NOT NULL; -psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET DEFAULT 1; -psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET STORAGE EXTERNAL; -psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u DISABLE TRIGGER ALL; -psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u SET TABLESPACE foo; -psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u NOT OF; -psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u OWNER TO CURRENT_USER; -psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables -\set ON_ERROR_STOP 1 -ALTER TABLE :drop_chunks_mat_table_u SET SCHEMA public; -ALTER TABLE :drop_chunks_mat_table_u_name RENAME TO new_name; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SET client_min_messages TO NOTICE; -SELECT * FROM new_name; - time_bucket | count --------------+------- - 0 | 3 - 3 | 3 - 6 | 3 - 9 | 3 - 12 | 3 -(5 rows) - -SELECT * FROM drop_chunks_view ORDER BY 1; - time_bucket | count --------------+------- - 0 | 3 - 3 | 3 - 6 | 3 - 9 | 3 - 12 | 3 -(5 rows) - -\set ON_ERROR_STOP 0 --- no continuous aggregates on a continuous aggregate materialization table -CREATE MATERIALIZED VIEW new_name_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('6', time_bucket), COUNT("count") - FROM new_name - GROUP BY 1 WITH NO DATA; -psql:include/cagg_ddl_common.sql:326: ERROR: hypertable is a continuous aggregate materialization table -\set ON_ERROR_STOP 1 -CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('metrics', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (8,public,metrics,t) -(1 row) - -\else -SELECT create_hypertable('metrics','time'); -\endif -INSERT INTO metrics SELECT generate_series('2000-01-01'::timestamptz,'2000-01-10','1m'),1,0.25,0.75; --- check expressions in view definition -CREATE MATERIALIZED VIEW cagg_expr - WITH (timescaledb.continuous, timescaledb.materialized_only=true) -AS -SELECT - time_bucket('1d', time) AS time, - 'Const'::text AS Const, - 4.3::numeric AS "numeric", - first(metrics,time), - CASE WHEN true THEN 'foo' ELSE 'bar' END, - COALESCE(NULL,'coalesce'), - avg(v1) + avg(v2) AS avg1, - avg(v1+v2) AS avg2 -FROM metrics -GROUP BY 1 WITH NO DATA; -CALL refresh_continuous_aggregate('cagg_expr', NULL, NULL); -SELECT * FROM cagg_expr ORDER BY time LIMIT 5; - time | const | numeric | first | case | coalesce | avg1 | avg2 -------------------------------+-------+---------+----------------------------------------------+------+----------+------+------ - Fri Dec 31 16:00:00 1999 PST | Const | 4.3 | ("Sat Jan 01 00:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 - Sat Jan 01 16:00:00 2000 PST | Const | 4.3 | ("Sat Jan 01 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 - Sun Jan 02 16:00:00 2000 PST | Const | 4.3 | ("Sun Jan 02 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 - Mon Jan 03 16:00:00 2000 PST | Const | 4.3 | ("Mon Jan 03 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 - Tue Jan 04 16:00:00 2000 PST | Const | 4.3 | ("Tue Jan 04 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 -(5 rows) - ---test materialization of invalidation before drop -DROP TABLE IF EXISTS drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:358: NOTICE: table "drop_chunks_table" does not exist, skipping -DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; -psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk -CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT hypertable_id AS drop_chunks_table_nid - FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset -\else -SELECT hypertable_id AS drop_chunks_table_nid - FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset -\endif -CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; -$DIST$); -\endif -SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test2'); - set_integer_now_func ----------------------- - -(1 row) - -CREATE MATERIALIZED VIEW drop_chunks_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('5', time), max(data) - FROM drop_chunks_table - GROUP BY 1 WITH NO DATA; -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; ---dropping chunks will process the invalidations -SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); - drop_chunks ------------------------------------------------ - _timescaledb_internal._dist_hyper_10_13_chunk -(1 row) - -SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; - time | data -------+------ - 10 | 10 -(1 row) - -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(20, 35) AS i; -CALL refresh_continuous_aggregate('drop_chunks_view', 10, 40); ---this will be seen after the drop its within the invalidation window and will be dropped -INSERT INTO drop_chunks_table VALUES (26, 100); ---this will not be processed by the drop since chunk 30-39 is not dropped but will be seen after refresh ---shows that the drop doesn't do more work than necessary -INSERT INTO drop_chunks_table VALUES (31, 200); ---move the time up to 39 -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(35, 39) AS i; ---the chunks and ranges we have thus far -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table'; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_14_chunk | 10 | 20 - _dist_hyper_10_15_chunk | 20 | 30 - _dist_hyper_10_16_chunk | 30 | 40 -(3 rows) - ---the invalidation on 25 not yet seen -SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; - time_bucket | max --------------+----- - 35 | 35 - 30 | 34 - 25 | 29 - 20 | 24 - 15 | 19 - 10 | 14 -(6 rows) - ---refresh to process the invalidations and then drop -CALL refresh_continuous_aggregate('drop_chunks_view', NULL, (integer_now_test2()-9)); -SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); - drop_chunks ------------------------------------------------ - _timescaledb_internal._dist_hyper_10_14_chunk - _timescaledb_internal._dist_hyper_10_15_chunk -(2 rows) - ---new values on 25 now seen in view -SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; - time_bucket | max --------------+----- - 35 | 35 - 30 | 34 - 25 | 100 - 20 | 24 - 15 | 19 - 10 | 14 -(6 rows) - ---earliest datapoint now in table -SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; - time | data -------+------ - 30 | 30 -(1 row) - ---we see the chunks row with the dropped flags set; -SELECT * FROM _timescaledb_catalog.chunk where dropped; - id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk -----+---------------+-----------------------+-------------------------+---------------------+---------+--------+----------- - 13 | 10 | _timescaledb_internal | _dist_hyper_10_13_chunk | | t | 0 | f - 14 | 10 | _timescaledb_internal | _dist_hyper_10_14_chunk | | t | 0 | f - 15 | 10 | _timescaledb_internal | _dist_hyper_10_15_chunk | | t | 0 | f -(3 rows) - ---still see data in the view -SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC; - time_bucket | max --------------+----- - 25 | 100 - 20 | 24 - 15 | 19 - 10 | 14 -(4 rows) - ---no data but covers dropped chunks -SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; - time | data -------+------ -(0 rows) - ---recreate the dropped chunk -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; ---see data from recreated region -SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; - time | data -------+------ - 20 | 20 - 19 | 19 - 18 | 18 - 17 | 17 - 16 | 16 - 15 | 15 - 14 | 14 - 13 | 13 - 12 | 12 - 11 | 11 - 10 | 10 - 9 | 9 - 8 | 8 - 7 | 7 - 6 | 6 - 5 | 5 - 4 | 4 - 3 | 3 - 2 | 2 - 1 | 1 - 0 | 0 -(21 rows) - ---should show chunk with old name and old ranges -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table' -ORDER BY range_start_integer; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_13_chunk | 0 | 10 - _dist_hyper_10_14_chunk | 10 | 20 - _dist_hyper_10_15_chunk | 20 | 30 - _dist_hyper_10_16_chunk | 30 | 40 -(4 rows) - ---We dropped everything up to the bucket starting at 30 and then ---inserted new data up to and including time 20. Therefore, the ---dropped data should stay the same as long as we only refresh ---buckets that have non-dropped data. -CALL refresh_continuous_aggregate('drop_chunks_view', 30, 40); -SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; - time_bucket | max --------------+----- - 35 | 39 - 30 | 200 - 25 | 100 - 20 | 24 - 15 | 19 - 10 | 14 -(6 rows) - -SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, - schema_name AS drop_chunks_mat_schema, - table_name AS drop_chunks_mat_table_name - FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg - WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_nid - AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset --- TEST drop chunks from continuous aggregates by specifying view name -SELECT drop_chunks('drop_chunks_view', - newer_than => -20, - verbose => true); -psql:include/cagg_ddl_common.sql:454: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk - drop_chunks ------------------------------------------- - _timescaledb_internal._hyper_11_17_chunk -(1 row) - --- Test that we cannot drop chunks when specifying materialized --- hypertable -INSERT INTO drop_chunks_table SELECT generate_series(45, 55), 500; -CALL refresh_continuous_aggregate('drop_chunks_view', 45, 55); -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer; - chunk_name | range_start_integer | range_end_integer ---------------------+---------------------+------------------- - _hyper_11_20_chunk | 0 | 100 -(1 row) - -\set ON_ERROR_STOP 0 -\set VERBOSITY default -SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); -psql:include/cagg_ddl_common.sql:466: ERROR: operation not supported on materialized hypertable -DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. -HINT: Try the operation on the continuous aggregate instead. -\set VERBOSITY terse -\set ON_ERROR_STOP 1 ------------------------------------------------------------------ --- Test that refresh_continuous_aggregate on chunk will refresh, --- but only in the regions covered by the show chunks. ------------------------------------------------------------------ -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table' -ORDER BY 2,3; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_13_chunk | 0 | 10 - _dist_hyper_10_14_chunk | 10 | 20 - _dist_hyper_10_15_chunk | 20 | 30 - _dist_hyper_10_16_chunk | 30 | 40 - _dist_hyper_10_18_chunk | 40 | 50 - _dist_hyper_10_19_chunk | 50 | 60 -(6 rows) - --- Pick the second chunk as the one to drop -WITH numbered_chunks AS ( - SELECT row_number() OVER (ORDER BY range_start_integer), chunk_schema, chunk_name, range_start_integer, range_end_integer - FROM timescaledb_information.chunks - WHERE hypertable_name = 'drop_chunks_table' - ORDER BY 1 -) -SELECT format('%I.%I', chunk_schema, chunk_name) AS chunk_to_drop, range_start_integer, range_end_integer -FROM numbered_chunks -WHERE row_number = 2 \gset --- There's data in the table for the chunk/range we will drop -SELECT * FROM drop_chunks_table -WHERE time >= :range_start_integer -AND time < :range_end_integer -ORDER BY 1; - time | data -------+------ - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 -(10 rows) - --- Make sure there is also data in the continuous aggregate --- CARE: --- Note that this behaviour of dropping the materialization table chunks and expecting a refresh --- that overlaps that time range to NOT update those chunks is undefined. Since CAGGs over --- distributed hypertables merge the invalidations the refresh region is updated in the distributed --- case, which may be different than what happens in the normal hypertable case. The command was: --- SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); -CALL refresh_continuous_aggregate('drop_chunks_view', 0, 50); -SELECT * FROM drop_chunks_view -ORDER BY 1; - time_bucket | max --------------+----- - 0 | 4 - 5 | 9 - 10 | 14 - 15 | 19 - 20 | 20 - 30 | 200 - 35 | 39 - 45 | 500 - 50 | 500 -(9 rows) - --- Drop the second chunk, to leave a gap in the data -\if :IS_DISTRIBUTED -CALL distributed_exec(format('DROP TABLE IF EXISTS %s', :'chunk_to_drop')); -DROP FOREIGN TABLE :chunk_to_drop; -\else -DROP TABLE :chunk_to_drop; -\endif --- Verify that the second chunk is dropped -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table' -ORDER BY 2,3; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_13_chunk | 0 | 10 - _dist_hyper_10_15_chunk | 20 | 30 - _dist_hyper_10_16_chunk | 30 | 40 - _dist_hyper_10_18_chunk | 40 | 50 - _dist_hyper_10_19_chunk | 50 | 60 -(5 rows) - --- Data is no longer in the table but still in the view -SELECT * FROM drop_chunks_table -WHERE time >= :range_start_integer -AND time < :range_end_integer -ORDER BY 1; - time | data -------+------ -(0 rows) - -SELECT * FROM drop_chunks_view -WHERE time_bucket >= :range_start_integer -AND time_bucket < :range_end_integer -ORDER BY 1; - time_bucket | max --------------+----- - 10 | 14 - 15 | 19 -(2 rows) - --- Insert a large value in one of the chunks that will be dropped -INSERT INTO drop_chunks_table VALUES (:range_start_integer-1, 100); --- Now refresh and drop the two adjecent chunks -CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30); -SELECT drop_chunks('drop_chunks_table', older_than=>30); - drop_chunks ------------------------------------------------ - _timescaledb_internal._dist_hyper_10_13_chunk - _timescaledb_internal._dist_hyper_10_15_chunk -(2 rows) - --- Verify that the chunks are dropped -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table' -ORDER BY 2,3; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_16_chunk | 30 | 40 - _dist_hyper_10_18_chunk | 40 | 50 - _dist_hyper_10_19_chunk | 50 | 60 -(3 rows) - --- The continuous aggregate should be refreshed in the regions covered --- by the dropped chunks, but not in the "gap" region, i.e., the --- region of the chunk that was dropped via DROP TABLE. -SELECT * FROM drop_chunks_view -ORDER BY 1; - time_bucket | max --------------+----- - 0 | 4 - 5 | 100 - 20 | 20 - 30 | 200 - 35 | 39 - 45 | 500 - 50 | 500 -(7 rows) - --- Now refresh in the region of the first two dropped chunks -CALL refresh_continuous_aggregate('drop_chunks_view', 0, :range_end_integer); --- Aggregate data in the refreshed range should no longer exist since --- the underlying data was dropped. -SELECT * FROM drop_chunks_view -ORDER BY 1; - time_bucket | max --------------+----- - 20 | 20 - 30 | 200 - 35 | 39 - 45 | 500 - 50 | 500 -(5 rows) - --------------------------------------------------------------------- --- Check that we can create a materialized table in a tablespace. We --- create one with tablespace and one without and compare them. -CREATE VIEW cagg_info AS -WITH - caggs AS ( - SELECT format('%I.%I', user_view_schema, user_view_name)::regclass AS user_view, - format('%I.%I', direct_view_schema, direct_view_name)::regclass AS direct_view, - format('%I.%I', partial_view_schema, partial_view_name)::regclass AS partial_view, - format('%I.%I', ht.schema_name, ht.table_name)::regclass AS mat_relid - FROM _timescaledb_catalog.hypertable ht, - _timescaledb_catalog.continuous_agg cagg - WHERE ht.id = cagg.mat_hypertable_id - ) -SELECT user_view, - pg_get_userbyid(relowner) AS user_view_owner, - relname AS mat_table, - (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = mat_relid) AS mat_table_owner, - direct_view, - (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = direct_view) AS direct_view_owner, - partial_view, - (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = partial_view) AS partial_view_owner, - (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace - FROM pg_class JOIN caggs ON pg_class.oid = caggs.mat_relid; -GRANT SELECT ON cagg_info TO PUBLIC; -CREATE VIEW chunk_info AS -SELECT ht.schema_name, ht.table_name, relname AS chunk_name, - (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace - FROM pg_class c, - _timescaledb_catalog.hypertable ht, - _timescaledb_catalog.chunk ch - WHERE ch.table_name = c.relname AND ht.id = ch.hypertable_id; -CREATE TABLE whatever(time BIGINT NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT hypertable_id AS whatever_nid - FROM create_distributed_hypertable('whatever', 'time', chunk_time_interval => 10, replication_factor => 2) -\gset -\else -SELECT hypertable_id AS whatever_nid - FROM create_hypertable('whatever', 'time', chunk_time_interval => 10) -\gset -\endif -SELECT set_integer_now_func('whatever', 'integer_now_test'); - set_integer_now_func ----------------------- - -(1 row) - -CREATE MATERIALIZED VIEW whatever_view_1 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT time_bucket('5', time), COUNT(data) - FROM whatever GROUP BY 1 WITH NO DATA; -CREATE MATERIALIZED VIEW whatever_view_2 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) -TABLESPACE tablespace1 AS -SELECT time_bucket('5', time), COUNT(data) - FROM whatever GROUP BY 1 WITH NO DATA; -INSERT INTO whatever SELECT i, i FROM generate_series(0, 29) AS i; -CALL refresh_continuous_aggregate('whatever_view_1', NULL, NULL); -CALL refresh_continuous_aggregate('whatever_view_2', NULL, NULL); -SELECT user_view, - mat_table, - cagg_info.tablespace AS mat_tablespace, - chunk_name, - chunk_info.tablespace AS chunk_tablespace - FROM cagg_info, chunk_info - WHERE mat_table::text = table_name - AND user_view::text LIKE 'whatever_view%'; - user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace ------------------+-----------------------------+----------------+--------------------+------------------ - whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk | - whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 -(2 rows) - -ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2; -SELECT user_view, - mat_table, - cagg_info.tablespace AS mat_tablespace, - chunk_name, - chunk_info.tablespace AS chunk_tablespace - FROM cagg_info, chunk_info - WHERE mat_table::text = table_name - AND user_view::text LIKE 'whatever_view%'; - user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace ------------------+-----------------------------+----------------+--------------------+------------------ - whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2 - whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 -(2 rows) - -DROP MATERIALIZED VIEW whatever_view_1; -psql:include/cagg_ddl_common.sql:644: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk -DROP MATERIALIZED VIEW whatever_view_2; -psql:include/cagg_ddl_common.sql:645: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk --- test bucket width expressions on integer hypertables -CREATE TABLE metrics_int2 ( - time int2 NOT NULL, - device_id int, - v1 float, - v2 float -); -CREATE TABLE metrics_int4 ( - time int4 NOT NULL, - device_id int, - v1 float, - v2 float -); -CREATE TABLE metrics_int8 ( - time int8 NOT NULL, - device_id int, - v1 float, - v2 float -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10, replication_factor => 2) -FROM ( - VALUES ('int2'), - ('int4'), - ('int8')) v (dt); - create_distributed_hypertable -------------------------------- - (15,public,metrics_int2,t) - (16,public,metrics_int4,t) - (17,public,metrics_int8,t) -(3 rows) - -\else -SELECT create_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10) -FROM ( - VALUES ('int2'), - ('int4'), - ('int8')) v (dt); -\endif -CREATE OR REPLACE FUNCTION int2_now () - RETURNS int2 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int2 -$$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION int2_now () - RETURNS int2 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int2 -$$; -$DIST$); -\endif -CREATE OR REPLACE FUNCTION int4_now () - RETURNS int4 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int4 -$$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION int4_now () - RETURNS int4 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int4 -$$; -$DIST$); -\endif -CREATE OR REPLACE FUNCTION int8_now () - RETURNS int8 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int8 -$$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION int8_now () - RETURNS int8 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int8 -$$; -$DIST$); -\endif -SELECT set_integer_now_func (('metrics_' || dt)::regclass, (dt || '_now')::regproc) -FROM ( - VALUES ('int2'), - ('int4'), - ('int8')) v (dt); - set_integer_now_func ----------------------- - - - -(3 rows) - --- width expression for int2 hypertables -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1::smallint, time) -FROM metrics_int2 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:750: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1::smallint + 2::smallint, time) -FROM metrics_int2 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:757: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; --- width expression for int4 hypertables -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1, time) -FROM metrics_int4 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:765: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1 + 2, time) -FROM metrics_int4 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:772: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; --- width expression for int8 hypertables -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1, time) -FROM metrics_int8 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:780: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1 + 2, time) -FROM metrics_int8 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:787: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; -\set ON_ERROR_STOP 0 --- non-immutable expresions should be rejected -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(extract(year FROM now())::smallint, time) -FROM metrics_int2 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:796: ERROR: only immutable expressions allowed in time bucket function -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(extract(year FROM now())::int, time) -FROM metrics_int4 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(extract(year FROM now())::int, time) -FROM metrics_int8 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function -\set ON_ERROR_STOP 1 --- Test various ALTER MATERIALIZED VIEW statements. -SET ROLE :ROLE_DEFAULT_PERM_USER; -CREATE MATERIALIZED VIEW owner_check WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1 + 2, time) -FROM metrics_int8 -GROUP BY 1 -WITH NO DATA; -\x on -SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; --[ RECORD 1 ]------+--------------------------------------- -user_view | owner_check -user_view_owner | default_perm_user -mat_table | _materialized_hypertable_24 -mat_table_owner | default_perm_user -direct_view | _timescaledb_internal._direct_view_24 -direct_view_owner | default_perm_user -partial_view | _timescaledb_internal._partial_view_24 -partial_view_owner | default_perm_user -tablespace | - -\x off --- This should not work since the target user has the wrong role, but --- we test that the normal checks are done when changing the owner. -\set ON_ERROR_STOP 0 -ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; -psql:include/cagg_ddl_common.sql:826: ERROR: must be member of role "test_role_1" -\set ON_ERROR_STOP 1 --- Superuser can always change owner -SET ROLE :ROLE_CLUSTER_SUPERUSER; -ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; -\x on -SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; --[ RECORD 1 ]------+--------------------------------------- -user_view | owner_check -user_view_owner | test_role_1 -mat_table | _materialized_hypertable_24 -mat_table_owner | test_role_1 -direct_view | _timescaledb_internal._direct_view_24 -direct_view_owner | test_role_1 -partial_view | _timescaledb_internal._partial_view_24 -partial_view_owner | test_role_1 -tablespace | - -\x off --- --- Test drop continuous aggregate cases --- --- Issue: #2608 --- -CREATE OR REPLACE FUNCTION test_int_now() - RETURNS INT LANGUAGE SQL STABLE AS -$BODY$ - SELECT 50; -$BODY$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ - CREATE OR REPLACE FUNCTION test_int_now() - RETURNS INT LANGUAGE SQL STABLE AS - $BODY$ - SELECT 50; - $BODY$; -$DIST$); -\endif -CREATE TABLE conditionsnm(time_int INT NOT NULL, device INT, value FLOAT); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10, replication_factor => 2); - create_distributed_hypertable -------------------------------- - (25,public,conditionsnm,t) -(1 row) - -\else -SELECT create_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10); -\endif -SELECT set_integer_now_func('conditionsnm', 'test_int_now'); - set_integer_now_func ----------------------- - -(1 row) - -INSERT INTO conditionsnm -SELECT time_val, time_val % 4, 3.14 FROM generate_series(0,100,1) AS time_val; --- Case 1: DROP -CREATE MATERIALIZED VIEW conditionsnm_4 -WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) -AS -SELECT time_bucket(7, time_int) as bucket, -SUM(value), COUNT(value) -FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:874: NOTICE: refreshing continuous aggregate "conditionsnm_4" -DROP materialized view conditionsnm_4; -psql:include/cagg_ddl_common.sql:876: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk --- Case 2: DROP CASCADE should have similar behaviour as DROP -CREATE MATERIALIZED VIEW conditionsnm_4 -WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) -AS -SELECT time_bucket(7, time_int) as bucket, -SUM(value), COUNT(value) -FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:884: NOTICE: refreshing continuous aggregate "conditionsnm_4" -DROP materialized view conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:886: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk --- Case 3: require CASCADE in case of dependent object -CREATE MATERIALIZED VIEW conditionsnm_4 -WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) -AS -SELECT time_bucket(7, time_int) as bucket, -SUM(value), COUNT(value) -FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:894: NOTICE: refreshing continuous aggregate "conditionsnm_4" -CREATE VIEW see_cagg as select * from conditionsnm_4; -\set ON_ERROR_STOP 0 -DROP MATERIALIZED VIEW conditionsnm_4; -psql:include/cagg_ddl_common.sql:898: ERROR: cannot drop view conditionsnm_4 because other objects depend on it -\set ON_ERROR_STOP 1 --- Case 4: DROP CASCADE with dependency -DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to view see_cagg -psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk --- Test DROP SCHEMA CASCADE with continuous aggregates --- --- Issue: #2350 --- --- Case 1: DROP SCHEMA CASCADE -CREATE SCHEMA test_schema; -CREATE TABLE test_schema.telemetry_raw ( - ts TIMESTAMP WITH TIME ZONE NOT NULL, - value DOUBLE PRECISION -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); - create_distributed_hypertable ----------------------------------- - (29,test_schema,telemetry_raw,t) -(1 row) - -\else -SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); -\endif -CREATE MATERIALIZED VIEW test_schema.telemetry_1s - WITH (timescaledb.continuous, timescaledb.materialized_only=false) - AS -SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, - avg(value) - FROM test_schema.telemetry_raw - GROUP BY ts_1s WITH NO DATA; -SELECT ca.raw_hypertable_id, - h.schema_name, - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'telemetry_1s'; - raw_hypertable_id | schema_name | MAT_TABLE_NAME | PART_VIEW_NAME | partial_view_schema --------------------+-----------------------+-----------------------------+------------------+----------------------- - 29 | _timescaledb_internal | _materialized_hypertable_30 | _partial_view_30 | _timescaledb_internal -(1 row) - -\gset -DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:941: NOTICE: drop cascades to 4 other objects -SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = 'telemetry_1s'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; - count -------- - 0 -(1 row) - --- Case 2: DROP SCHEMA CASCADE with multiple caggs -CREATE SCHEMA test_schema; -CREATE TABLE test_schema.telemetry_raw ( - ts TIMESTAMP WITH TIME ZONE NOT NULL, - value DOUBLE PRECISION -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); - create_distributed_hypertable ----------------------------------- - (31,test_schema,telemetry_raw,t) -(1 row) - -\else -SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); -\endif -CREATE MATERIALIZED VIEW test_schema.cagg1 - WITH (timescaledb.continuous, timescaledb.materialized_only=false) - AS -SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, - avg(value) - FROM test_schema.telemetry_raw - GROUP BY ts_1s WITH NO DATA; -CREATE MATERIALIZED VIEW test_schema.cagg2 - WITH (timescaledb.continuous, timescaledb.materialized_only=false) - AS -SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, - avg(value) - FROM test_schema.telemetry_raw - GROUP BY ts_1s WITH NO DATA; -SELECT ca.raw_hypertable_id, - h.schema_name, - h.table_name AS "MAT_TABLE_NAME1", - partial_view_name as "PART_VIEW_NAME1", - partial_view_schema -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'cagg1'; - raw_hypertable_id | schema_name | MAT_TABLE_NAME1 | PART_VIEW_NAME1 | partial_view_schema --------------------+-----------------------+-----------------------------+------------------+----------------------- - 31 | _timescaledb_internal | _materialized_hypertable_32 | _partial_view_32 | _timescaledb_internal -(1 row) - -\gset -SELECT ca.raw_hypertable_id, - h.schema_name, - h.table_name AS "MAT_TABLE_NAME2", - partial_view_name as "PART_VIEW_NAME2", - partial_view_schema -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'cagg2'; - raw_hypertable_id | schema_name | MAT_TABLE_NAME2 | PART_VIEW_NAME2 | partial_view_schema --------------------+-----------------------+-----------------------------+------------------+----------------------- - 31 | _timescaledb_internal | _materialized_hypertable_33 | _partial_view_33 | _timescaledb_internal -(1 row) - -\gset -DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:998: NOTICE: drop cascades to 7 other objects -SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME1'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = 'cagg1'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME2'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME2'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = 'cagg2'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; - count -------- - 0 -(1 row) - -DROP TABLESPACE tablespace1; -DROP TABLESPACE tablespace2; --- Check that we can rename a column of a materialized view and still --- rebuild it after (#3051, #3405) -CREATE TABLE conditions ( - time TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (34,public,conditions,t) -(1 row) - -\else -SELECT create_hypertable('conditions', 'time'); -\endif -INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); -INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); -INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); -INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); -INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); -INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); -INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); -INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); -INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); -INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); -INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); -CREATE MATERIALIZED VIEW conditions_daily -WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS -SELECT location, - time_bucket(INTERVAL '1 day', time) AS bucket, - AVG(temperature) - FROM conditions -GROUP BY location, bucket -WITH NO DATA; -SELECT format('%I.%I', '_timescaledb_internal', h.table_name) AS "MAT_TABLE_NAME", - format('%I.%I', '_timescaledb_internal', partial_view_name) AS "PART_VIEW_NAME", - format('%I.%I', '_timescaledb_internal', direct_view_name) AS "DIRECT_VIEW_NAME" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'conditions_daily' -\gset --- Show both the columns and the view definitions to see that --- references are correct in the view as well. -SELECT * FROM test.show_columns('conditions_daily'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - bucket | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - bucket | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - bucket | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - bucket | timestamp with time zone | t - avg | double precision | f -(3 rows) - -ALTER MATERIALIZED VIEW conditions_daily RENAME COLUMN bucket to "time"; --- Show both the columns and the view definitions to see that --- references are correct in the view as well. -SELECT * FROM test.show_columns(' conditions_daily'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - time | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - time | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - time | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - time | timestamp with time zone | t - avg | double precision | f -(3 rows) - --- This will rebuild the materialized view and should succeed. -ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only = false); --- Refresh the continuous aggregate to check that it works after the --- rename. -\set VERBOSITY verbose -CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); -\set VERBOSITY terse --- --- Indexes on continuous aggregate --- -\set ON_ERROR_STOP 0 --- unique indexes are not supported -CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); -psql:include/cagg_ddl_common.sql:1084: ERROR: continuous aggregates do not support UNIQUE indexes --- concurrently index creation not supported -CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); -psql:include/cagg_ddl_common.sql:1086: ERROR: hypertables do not support concurrent index creation -\set ON_ERROR_STOP 1 -CREATE INDEX index_avg ON conditions_daily (avg); -CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); -CREATE INDEX index_avg_include ON conditions_daily (avg) INCLUDE (location); -CREATE INDEX index_avg_expr ON conditions_daily ((avg + 1)); -CREATE INDEX index_avg_location_sfo ON conditions_daily (avg) WHERE location = 'SFO'; -CREATE INDEX index_avg_expr_location_sfo ON conditions_daily ((avg + 2)) WHERE location = 'SFO'; -SELECT * FROM test.show_indexespred(:'MAT_TABLE_NAME'); - Index | Columns | Expr | Pred | Unique | Primary | Exclusion | Tablespace ------------------------------------------------------------------------+-------------------+---------------------------+------------------------+--------+---------+-----------+------------ - _timescaledb_internal._materialized_hypertable_35_bucket_idx | {bucket} | | | f | f | f | - _timescaledb_internal._materialized_hypertable_35_location_bucket_idx | {location,bucket} | | | f | f | f | - _timescaledb_internal.index_avg | {avg} | | | f | f | f | - _timescaledb_internal.index_avg_expr | {expr} | avg + 1::double precision | | f | f | f | - _timescaledb_internal.index_avg_expr_location_sfo | {expr} | avg + 2::double precision | location = 'SFO'::text | f | f | f | - _timescaledb_internal.index_avg_include | {avg,location} | | | f | f | f | - _timescaledb_internal.index_avg_location_sfo | {avg} | | location = 'SFO'::text | f | f | f | - _timescaledb_internal.index_avg_only | {avg} | | | f | f | f | -(8 rows) - --- #3696 assertion failure when referencing columns not present in result -CREATE TABLE i3696(time timestamptz NOT NULL, search_query text, cnt integer, cnt2 integer); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('i3696', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (36,public,i3696,t) -(1 row) - -\else -SELECT table_name FROM create_hypertable('i3696','time'); -\endif -CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) -AS - SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket - FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; -psql:include/cagg_ddl_common.sql:1108: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date -ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); -CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous, timescaledb.materialized_only=false) -AS - SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket - FROM i3696 GROUP BY cnt + cnt2, bucket, search_query - HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; -psql:include/cagg_ddl_common.sql:1116: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date -ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); ---TEST test with multiple settings on continuous aggregates -- --- test for materialized_only + compress combinations (real time aggs enabled initially) -CREATE TABLE test_setting(time timestamptz not null, val numeric); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('test_setting', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (39,public,test_setting,t) -(1 row) - -\else -SELECT create_hypertable('test_setting', 'time'); -\endif -CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only=false) -AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1130: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date -INSERT INTO test_setting -SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; -CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - ---this row is not in the materialized result --- -INSERT INTO test_setting VALUES( '2020-11-01', 20); ---try out 2 settings here -- -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1141: NOTICE: defaulting compress_orderby to time_bucket -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | t | t -(1 row) - ---real time aggs is off now , should return 20 -- -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - ---now set it back to false -- -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1149: NOTICE: defaulting compress_orderby to time_bucket -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | t | f -(1 row) - ---count should return additional data since we have real time aggs on -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 21 -(1 row) - -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | f | t -(1 row) - ---real time aggs is off now , should return 20 -- -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | f | f -(1 row) - ---count should return additional data since we have real time aggs on -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 21 -(1 row) - -DELETE FROM test_setting WHERE val = 20; ---TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- --- test for materialized_only + compress combinations (real time aggs enabled initially) -DROP MATERIALIZED VIEW test_setting_cagg; -psql:include/cagg_ddl_common.sql:1174: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk -CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) -AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1177: NOTICE: refreshing continuous aggregate "test_setting_cagg" -CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - ---this row is not in the materialized result --- -INSERT INTO test_setting VALUES( '2020-11-01', 20); ---try out 2 settings here -- -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1185: NOTICE: defaulting compress_orderby to time_bucket -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | t | f -(1 row) - ---count should return additional data since we have real time aggs on -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 21 -(1 row) - ---now set it back to false -- -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1193: NOTICE: defaulting compress_orderby to time_bucket -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | t | t -(1 row) - ---real time aggs is off now , should return 20 -- -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | f | f -(1 row) - ---count should return additional data since we have real time aggs on -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 21 -(1 row) - -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | f | t -(1 row) - ---real time aggs is off now , should return 20 -- -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - --- END TEST with multiple settings --- Test View Target Entries that contain both aggrefs and Vars in the same expression -CREATE TABLE transactions -( - "time" timestamp with time zone NOT NULL, - dummy1 integer, - dummy2 integer, - dummy3 integer, - dummy4 integer, - dummy5 integer, - amount integer, - fiat_value integer -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('transactions', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (46,public,transactions,t) -(1 row) - -\else -SELECT create_hypertable('transactions', 'time'); -\endif -INSERT INTO transactions VALUES ( '2018-01-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-01-02 09:30:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-01-02 09:20:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-01-02 09:10:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 10:40:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 11:50:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 12:10:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 13:10:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-11-02 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-02 10:30:00-08', 0, 0, 0, 0, 0, -1, 10); -CREATE materialized view cashflows( - bucket, - amount, - cashflow, - cashflow2 -) WITH ( - timescaledb.continuous, - timescaledb.materialized_only = true -) AS -SELECT time_bucket ('1 day', time) AS bucket, - amount, - CASE - WHEN amount < 0 THEN (0 - sum(fiat_value)) - ELSE sum(fiat_value) - END AS cashflow, - amount + sum(fiat_value) -FROM transactions -GROUP BY bucket, amount; -psql:include/cagg_ddl_common.sql:1267: NOTICE: refreshing continuous aggregate "cashflows" -SELECT h.table_name AS "MAT_TABLE_NAME", - partial_view_name AS "PART_VIEW_NAME", - direct_view_name AS "DIRECT_VIEW_NAME" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'cashflows' -\gset --- Show both the columns and the view definitions to see that --- references are correct in the view as well. -\d+ "_timescaledb_internal".:"DIRECT_VIEW_NAME" - View "_timescaledb_internal._direct_view_47" - Column | Type | Collation | Nullable | Default | Storage | Description ------------+--------------------------+-----------+----------+---------+---------+------------- - bucket | timestamp with time zone | | | | plain | - amount | integer | | | | plain | - cashflow | bigint | | | | plain | - cashflow2 | bigint | | | | plain | -View definition: - SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, - transactions.amount, - CASE - WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) - ELSE sum(transactions.fiat_value) - END AS cashflow, - transactions.amount + sum(transactions.fiat_value) AS cashflow2 - FROM transactions - GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; - -\d+ "_timescaledb_internal".:"PART_VIEW_NAME" - View "_timescaledb_internal._partial_view_47" - Column | Type | Collation | Nullable | Default | Storage | Description ------------+--------------------------+-----------+----------+---------+---------+------------- - bucket | timestamp with time zone | | | | plain | - amount | integer | | | | plain | - cashflow | bigint | | | | plain | - cashflow2 | bigint | | | | plain | -View definition: - SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, - transactions.amount, - CASE - WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) - ELSE sum(transactions.fiat_value) - END AS cashflow, - transactions.amount + sum(transactions.fiat_value) AS cashflow2 - FROM transactions - GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; - -\d+ "_timescaledb_internal".:"MAT_TABLE_NAME" - Table "_timescaledb_internal._materialized_hypertable_47" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ------------+--------------------------+-----------+----------+---------+---------+--------------+------------- - bucket | timestamp with time zone | | not null | | plain | | - amount | integer | | | | plain | | - cashflow | bigint | | | | plain | | - cashflow2 | bigint | | | | plain | | -Indexes: - "_materialized_hypertable_47_amount_bucket_idx" btree (amount, bucket DESC) - "_materialized_hypertable_47_bucket_idx" btree (bucket DESC) -Triggers: - ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_47 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() -Child tables: _timescaledb_internal._hyper_47_52_chunk, - _timescaledb_internal._hyper_47_53_chunk - -\d+ 'cashflows' - View "public.cashflows" - Column | Type | Collation | Nullable | Default | Storage | Description ------------+--------------------------+-----------+----------+---------+---------+------------- - bucket | timestamp with time zone | | | | plain | - amount | integer | | | | plain | - cashflow | bigint | | | | plain | - cashflow2 | bigint | | | | plain | -View definition: - SELECT _materialized_hypertable_47.bucket, - _materialized_hypertable_47.amount, - _materialized_hypertable_47.cashflow, - _materialized_hypertable_47.cashflow2 - FROM _timescaledb_internal._materialized_hypertable_47; - -SELECT * FROM cashflows; - bucket | amount | cashflow | cashflow2 -------------------------------+--------+----------+----------- - Sun Dec 31 16:00:00 2017 PST | 1 | 10 | 11 - Mon Jan 01 16:00:00 2018 PST | -1 | -30 | 29 - Wed Oct 31 17:00:00 2018 PDT | -1 | -20 | 19 - Wed Oct 31 17:00:00 2018 PDT | 1 | 30 | 31 - Thu Nov 01 17:00:00 2018 PDT | -1 | -10 | 9 - Thu Nov 01 17:00:00 2018 PDT | 1 | 10 | 11 -(6 rows) - --- test cagg creation with named arguments in time_bucket --- note that positional arguments cannot follow named arguments --- 1. test named origin --- 2. test named timezone --- 3. test named ts --- 4. test named bucket width --- named origin -CREATE MATERIALIZED VIEW cagg_named_origin WITH -(timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket('1h', time, 'UTC', origin => '2001-01-03 01:23:45') AS bucket, -avg(amount) as avg_amount -FROM transactions GROUP BY 1 WITH NO DATA; --- named timezone -CREATE MATERIALIZED VIEW cagg_named_tz_origin WITH -(timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket('1h', time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, -avg(amount) as avg_amount -FROM transactions GROUP BY 1 WITH NO DATA; --- named ts -CREATE MATERIALIZED VIEW cagg_named_ts_tz_origin WITH -(timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket('1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, -avg(amount) as avg_amount -FROM transactions GROUP BY 1 WITH NO DATA; --- named bucket width -CREATE MATERIALIZED VIEW cagg_named_all WITH -(timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(bucket_width => '1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, -avg(amount) as avg_amount -FROM transactions GROUP BY 1 WITH NO DATA; --- Refreshing from the beginning (NULL) of a CAGG with variable time bucket and --- using an INTERVAL for the end timestamp (issue #5534) -CREATE MATERIALIZED VIEW transactions_montly -WITH (timescaledb.continuous, timescaledb.materialized_only = true) AS -SELECT time_bucket(INTERVAL '1 month', time) AS bucket, - SUM(fiat_value), - MAX(fiat_value), - MIN(fiat_value) - FROM transactions -GROUP BY 1 -WITH NO DATA; --- No rows -SELECT * FROM transactions_montly ORDER BY bucket; - bucket | sum | max | min ---------+-----+-----+----- -(0 rows) - --- Refresh from beginning of the CAGG for 1 month -CALL refresh_continuous_aggregate('transactions_montly', NULL, INTERVAL '1 month'); -SELECT * FROM transactions_montly ORDER BY bucket; - bucket | sum | max | min -------------------------------+-----+-----+----- - Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 - Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 -(2 rows) - -TRUNCATE transactions_montly; --- Partial refresh the CAGG from beginning to an specific timestamp -CALL refresh_continuous_aggregate('transactions_montly', NULL, '2018-11-01 11:50:00-08'::timestamptz); -SELECT * FROM transactions_montly ORDER BY bucket; - bucket | sum | max | min -------------------------------+-----+-----+----- - Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 -(1 row) - --- Full refresh the CAGG -CALL refresh_continuous_aggregate('transactions_montly', NULL, NULL); -SELECT * FROM transactions_montly ORDER BY bucket; - bucket | sum | max | min -------------------------------+-----+-----+----- - Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 - Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 -(2 rows) - --- Check set_chunk_time_interval on continuous aggregate -CREATE MATERIALIZED VIEW cagg_set_chunk_time_interval -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(INTERVAL '1 month', time) AS bucket, - SUM(fiat_value), - MAX(fiat_value), - MIN(fiat_value) -FROM transactions -GROUP BY 1 -WITH NO DATA; -SELECT set_chunk_time_interval('cagg_set_chunk_time_interval', chunk_time_interval => interval '1 month'); - set_chunk_time_interval -------------------------- - -(1 row) - -CALL refresh_continuous_aggregate('cagg_set_chunk_time_interval', NULL, NULL); -SELECT _timescaledb_functions.to_interval(d.interval_length) = interval '1 month' -FROM _timescaledb_catalog.dimension d - RIGHT JOIN _timescaledb_catalog.continuous_agg ca ON ca.user_view_name = 'cagg_set_chunk_time_interval' -WHERE d.hypertable_id = ca.mat_hypertable_id; - ?column? ----------- - t -(1 row) - --- Since #6077 CAggs are materialized only by default -DROP TABLE conditions CASCADE; -psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 3 other objects -psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 2 other objects -CREATE TABLE conditions ( - time TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (54,public,conditions,t) -(1 row) - -\else -SELECT create_hypertable('conditions', 'time'); -\endif -INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); -INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); -INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); -INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); -INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); -INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); -INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); -INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); -INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); -INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); -INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); -CREATE MATERIALIZED VIEW conditions_daily -WITH (timescaledb.continuous) AS -SELECT location, - time_bucket(INTERVAL '1 day', time) AS bucket, - AVG(temperature) - FROM conditions -GROUP BY location, bucket -WITH NO DATA; -\d+ conditions_daily - View "public.conditions_daily" - Column | Type | Collation | Nullable | Default | Storage | Description -----------+--------------------------+-----------+----------+---------+----------+------------- - location | text | | | | extended | - bucket | timestamp with time zone | | | | plain | - avg | double precision | | | | plain | -View definition: - SELECT _materialized_hypertable_55.location, - _materialized_hypertable_55.bucket, - _materialized_hypertable_55.avg - FROM _timescaledb_internal._materialized_hypertable_55; - --- Should return NO ROWS -SELECT * FROM conditions_daily ORDER BY bucket, avg; - location | bucket | avg -----------+--------+----- -(0 rows) - -ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=false); -\d+ conditions_daily - View "public.conditions_daily" - Column | Type | Collation | Nullable | Default | Storage | Description -----------+--------------------------+-----------+----------+---------+----------+------------- - location | text | | | | extended | - bucket | timestamp with time zone | | | | plain | - avg | double precision | | | | plain | -View definition: - SELECT _materialized_hypertable_55.location, - _materialized_hypertable_55.bucket, - _materialized_hypertable_55.avg - FROM _timescaledb_internal._materialized_hypertable_55 - WHERE _materialized_hypertable_55.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) -UNION ALL - SELECT conditions.location, - time_bucket('@ 1 day'::interval, conditions."time") AS bucket, - avg(conditions.temperature) AS avg - FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) - GROUP BY conditions.location, (time_bucket('@ 1 day'::interval, conditions."time")); - --- Should return ROWS because now it is realtime -SELECT * FROM conditions_daily ORDER BY bucket, avg; - location | bucket | avg -----------+------------------------------+----- - SFO | Sun Dec 31 16:00:00 2017 PST | 55 - NYC | Mon Jan 01 16:00:00 2018 PST | 65 - SFO | Mon Jan 01 16:00:00 2018 PST | 65 - por | Mon Jan 01 16:00:00 2018 PST | 100 - NYC | Wed Oct 31 17:00:00 2018 PDT | 65 - NYC | Thu Nov 01 17:00:00 2018 PDT | 15 -(6 rows) - --- Should return ROWS because we refreshed it -ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=true); -\d+ conditions_daily - View "public.conditions_daily" - Column | Type | Collation | Nullable | Default | Storage | Description -----------+--------------------------+-----------+----------+---------+----------+------------- - location | text | | | | extended | - bucket | timestamp with time zone | | | | plain | - avg | double precision | | | | plain | -View definition: - SELECT _materialized_hypertable_55.location, - _materialized_hypertable_55.bucket, - _materialized_hypertable_55.avg - FROM _timescaledb_internal._materialized_hypertable_55; - -CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); -SELECT * FROM conditions_daily ORDER BY bucket, avg; - location | bucket | avg -----------+------------------------------+----- - SFO | Sun Dec 31 16:00:00 2017 PST | 55 - SFO | Mon Jan 01 16:00:00 2018 PST | 65 - NYC | Mon Jan 01 16:00:00 2018 PST | 65 - por | Mon Jan 01 16:00:00 2018 PST | 100 - NYC | Wed Oct 31 17:00:00 2018 PDT | 65 - NYC | Thu Nov 01 17:00:00 2018 PDT | 15 -(6 rows) - --- cleanup -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); -DROP DATABASE :DATA_NODE_3 WITH (FORCE); diff --git a/tsl/test/expected/cagg_ddl_dist_ht-15.out b/tsl/test/expected/cagg_ddl_dist_ht-15.out deleted file mode 100644 index 8c4e1394a03..00000000000 --- a/tsl/test/expected/cagg_ddl_dist_ht-15.out +++ /dev/null @@ -1,2207 +0,0 @@ --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. ------------------------------------- --- Set up a distributed environment ------------------------------------- -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 -\set DATA_NODE_3 :TEST_DBNAME _3 -\ir include/remote_exec.sql --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. -CREATE SCHEMA IF NOT EXISTS test; -psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping -GRANT USAGE ON SCHEMA test TO PUBLIC; -CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) -RETURNS VOID -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' -LANGUAGE C; -CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) -RETURNS TABLE("table_record" CSTRING[]) -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' -LANGUAGE C; -SELECT node_name, database, node_created, database_created, extension_created -FROM ( - SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* - FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) -) a; - node_name | database | node_created | database_created | extension_created ------------------------+-----------------------+--------------+------------------+------------------- - db_cagg_ddl_dist_ht_1 | db_cagg_ddl_dist_ht_1 | t | t | t - db_cagg_ddl_dist_ht_2 | db_cagg_ddl_dist_ht_2 | t | t | t - db_cagg_ddl_dist_ht_3 | db_cagg_ddl_dist_ht_3 | t | t | t -(3 rows) - -GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; --- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes -GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; -\set IS_DISTRIBUTED TRUE -\ir include/cagg_ddl_common.sql --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. --- Set this variable to avoid using a hard-coded path each time query --- results are compared -\set QUERY_RESULT_TEST_EQUAL_RELPATH '../../../../test/sql/include/query_result_test_equal.sql' -\if :IS_DISTRIBUTED -\echo 'Running distributed hypertable tests' -Running distributed hypertable tests -\else -\echo 'Running local hypertable tests' -\endif -SET ROLE :ROLE_DEFAULT_PERM_USER; ---DDL commands on continuous aggregates -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature integer NULL, - humidity DOUBLE PRECISION NULL, - timemeasure TIMESTAMPTZ, - timeinterval INTERVAL -); -\if :IS_DISTRIBUTED -SELECT table_name FROM create_distributed_hypertable('conditions', 'timec', replication_factor => 2); - table_name ------------- - conditions -(1 row) - -\else -SELECT table_name FROM create_hypertable('conditions', 'timec'); -\endif --- schema tests -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; -CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; -CREATE SCHEMA rename_schema; -GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER; -SET ROLE :ROLE_DEFAULT_PERM_USER; -CREATE TABLE foo(time TIMESTAMPTZ NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('foo', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (2,public,foo,t) -(1 row) - -\else -SELECT create_hypertable('foo', 'time'); -\endif -CREATE MATERIALIZED VIEW rename_test - WITH ( timescaledb.continuous, timescaledb.materialized_only=true) -AS SELECT time_bucket('1week', time), COUNT(data) - FROM foo - GROUP BY 1 WITH NO DATA; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+-----------------------+------------------- - public | rename_test | _timescaledb_internal | _partial_view_3 -(1 row) - -ALTER MATERIALIZED VIEW rename_test SET SCHEMA rename_schema; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+-----------------------+------------------- - rename_schema | rename_test | _timescaledb_internal | _partial_view_3 -(1 row) - -SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID", - h.schema_name AS "MAT_SCHEMA_NAME", - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema as "PART_VIEW_SCHEMA", - direct_view_name as "DIR_VIEW_NAME", - direct_view_schema as "DIR_VIEW_SCHEMA" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'rename_test' -\gset -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER VIEW :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME" SET SCHEMA public; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+---------------------+------------------- - rename_schema | rename_test | public | _partial_view_3 -(1 row) - ---alter direct view schema -SELECT user_view_schema, user_view_name, direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | direct_view_schema | direct_view_name -------------------+----------------+-----------------------+------------------ - rename_schema | rename_test | _timescaledb_internal | _direct_view_3 -(1 row) - -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER VIEW :"DIR_VIEW_SCHEMA".:"DIR_VIEW_NAME" SET SCHEMA public; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+----------------+---------------------+-------------------+--------------------+------------------ - rename_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 -(1 row) - -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER SCHEMA rename_schema RENAME TO new_name_schema; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+----------------+---------------------+-------------------+--------------------+------------------ - new_name_schema | rename_test | public | _partial_view_3 | public | _direct_view_3 -(1 row) - -ALTER VIEW :"PART_VIEW_NAME" SET SCHEMA new_name_schema; -ALTER VIEW :"DIR_VIEW_NAME" SET SCHEMA new_name_schema; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+----------------+---------------------+-------------------+--------------------+------------------ - new_name_schema | rename_test | new_name_schema | _partial_view_3 | new_name_schema | _direct_view_3 -(1 row) - -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER SCHEMA new_name_schema RENAME TO foo_name_schema; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+---------------------+------------------- - foo_name_schema | rename_test | foo_name_schema | _partial_view_3 -(1 row) - -ALTER MATERIALIZED VIEW foo_name_schema.rename_test SET SCHEMA public; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+---------------------+------------------- - public | rename_test | foo_name_schema | _partial_view_3 -(1 row) - -RESET ROLE; -SELECT current_user; - current_user --------------------- - cluster_super_user -(1 row) - -ALTER SCHEMA foo_name_schema RENAME TO rename_schema; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SET client_min_messages TO NOTICE; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+----------------+---------------------+------------------- - public | rename_test | rename_schema | _partial_view_3 -(1 row) - -ALTER MATERIALIZED VIEW rename_test RENAME TO rename_c_aggregate; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name -------------------+--------------------+---------------------+------------------- - public | rename_c_aggregate | rename_schema | _partial_view_3 -(1 row) - -SELECT * FROM rename_c_aggregate; - time_bucket | count --------------+------- -(0 rows) - -ALTER VIEW rename_schema.:"PART_VIEW_NAME" RENAME TO partial_view; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+--------------------+---------------------+-------------------+--------------------+------------------ - public | rename_c_aggregate | rename_schema | partial_view | rename_schema | _direct_view_3 -(1 row) - ---rename direct view -ALTER VIEW rename_schema.:"DIR_VIEW_NAME" RENAME TO direct_view; -SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name, - direct_view_schema, direct_view_name - FROM _timescaledb_catalog.continuous_agg; - user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name -------------------+--------------------+---------------------+-------------------+--------------------+------------------ - public | rename_c_aggregate | rename_schema | partial_view | rename_schema | direct_view -(1 row) - --- drop_chunks tests -DROP TABLE conditions CASCADE; -DROP TABLE foo CASCADE; -psql:include/cagg_ddl_common.sql:161: NOTICE: drop cascades to 2 other objects -CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT hypertable_id AS drop_chunks_table_id - FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset -\else -SELECT hypertable_id AS drop_chunks_table_id - FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset -\endif -CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; -$DIST$); -\endif -SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test'); - set_integer_now_func ----------------------- - -(1 row) - -CREATE MATERIALIZED VIEW drop_chunks_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('5', time), COUNT(data) - FROM drop_chunks_table - GROUP BY 1 WITH NO DATA; -SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table, - schema_name AS drop_chunks_mat_schema, - table_name AS drop_chunks_mat_table_name - FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg - WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id - AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset --- create 3 chunks, with 3 time bucket -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i; --- Only refresh up to bucket 15 initially. Matches the old refresh --- behavior that didn't materialize everything -CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); -SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; - count -------- - 3 -(1 row) - -SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; - count -------- - 1 -(1 row) - -SELECT * FROM drop_chunks_view ORDER BY 1; - time_bucket | count --------------+------- - 0 | 5 - 5 | 5 - 10 | 5 -(3 rows) - --- cannot drop directly from the materialization table without specifying --- cont. aggregate view name explicitly -\set ON_ERROR_STOP 0 -SELECT drop_chunks(:'drop_chunks_mat_table', - newer_than => -20, - verbose => true); -psql:include/cagg_ddl_common.sql:213: ERROR: operation not supported on materialized hypertable -\set ON_ERROR_STOP 1 -SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; - count -------- - 3 -(1 row) - -SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; - count -------- - 1 -(1 row) - -SELECT * FROM drop_chunks_view ORDER BY 1; - time_bucket | count --------------+------- - 0 | 5 - 5 | 5 - 10 | 5 -(3 rows) - --- drop chunks when the chunksize and time_bucket aren't aligned -DROP TABLE drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:222: NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk -CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT hypertable_id AS drop_chunks_table_u_id - FROM create_distributed_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7, replication_factor => 2) \gset -\else -SELECT hypertable_id AS drop_chunks_table_u_id - FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset -\endif -CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$; -$DIST$); -\endif -SELECT set_integer_now_func('drop_chunks_table_u', 'integer_now_test1'); - set_integer_now_func ----------------------- - -(1 row) - -CREATE MATERIALIZED VIEW drop_chunks_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('3', time), COUNT(data) - FROM drop_chunks_table_u - GROUP BY 1 WITH NO DATA; -SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table_u, - schema_name AS drop_chunks_mat_schema, - table_name AS drop_chunks_mat_table_u_name - FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg - WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id - AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset --- create 3 chunks, with 3 time bucket -INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i; --- Refresh up to bucket 15 to match old materializer behavior -CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15); -SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; - count -------- - 4 -(1 row) - -SELECT count(c) FROM show_chunks('drop_chunks_view') AS c; - count -------- - 1 -(1 row) - -SELECT * FROM drop_chunks_view ORDER BY 1; - time_bucket | count --------------+------- - 0 | 3 - 3 | 3 - 6 | 3 - 9 | 3 - 12 | 3 -(5 rows) - --- TRUNCATE test --- Can truncate regular hypertables that have caggs -TRUNCATE drop_chunks_table_u; -\set ON_ERROR_STOP 0 --- Can't truncate materialized hypertables directly -TRUNCATE :drop_chunks_mat_table_u; -psql:include/cagg_ddl_common.sql:271: ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate -\set ON_ERROR_STOP 1 --- Check that we don't interfere with TRUNCATE of normal table and --- partitioned table -CREATE TABLE truncate (value int); -INSERT INTO truncate VALUES (1), (2); -TRUNCATE truncate; -SELECT * FROM truncate; - value -------- -(0 rows) - -CREATE TABLE truncate_partitioned (value int) - PARTITION BY RANGE(value); -CREATE TABLE truncate_p1 PARTITION OF truncate_partitioned - FOR VALUES FROM (1) TO (3); -INSERT INTO truncate_partitioned VALUES (1), (2); -TRUNCATE truncate_partitioned; -SELECT * FROM truncate_partitioned; - value -------- -(0 rows) - --- ALTER TABLE tests -\set ON_ERROR_STOP 0 --- test a variety of ALTER TABLE statements -ALTER TABLE :drop_chunks_mat_table_u RENAME time_bucket TO bad_name; -psql:include/cagg_ddl_common.sql:291: ERROR: renaming columns on materialization tables is not supported -ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(time_bucket); -psql:include/cagg_ddl_common.sql:292: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED; -psql:include/cagg_ddl_common.sql:293: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY; -psql:include/cagg_ddl_common.sql:294: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ADD COLUMN fizzle INTEGER; -psql:include/cagg_ddl_common.sql:295: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u DROP COLUMN time_bucket; -psql:include/cagg_ddl_common.sql:296: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket DROP NOT NULL; -psql:include/cagg_ddl_common.sql:297: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET DEFAULT 1; -psql:include/cagg_ddl_common.sql:298: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET STORAGE EXTERNAL; -psql:include/cagg_ddl_common.sql:299: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u DISABLE TRIGGER ALL; -psql:include/cagg_ddl_common.sql:300: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u SET TABLESPACE foo; -psql:include/cagg_ddl_common.sql:301: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u NOT OF; -psql:include/cagg_ddl_common.sql:302: ERROR: operation not supported on materialization tables -ALTER TABLE :drop_chunks_mat_table_u OWNER TO CURRENT_USER; -psql:include/cagg_ddl_common.sql:303: ERROR: operation not supported on materialization tables -\set ON_ERROR_STOP 1 -ALTER TABLE :drop_chunks_mat_table_u SET SCHEMA public; -ALTER TABLE :drop_chunks_mat_table_u_name RENAME TO new_name; -SET ROLE :ROLE_DEFAULT_PERM_USER; -SET client_min_messages TO NOTICE; -SELECT * FROM new_name; - time_bucket | count --------------+------- - 0 | 3 - 3 | 3 - 6 | 3 - 9 | 3 - 12 | 3 -(5 rows) - -SELECT * FROM drop_chunks_view ORDER BY 1; - time_bucket | count --------------+------- - 0 | 3 - 3 | 3 - 6 | 3 - 9 | 3 - 12 | 3 -(5 rows) - -\set ON_ERROR_STOP 0 --- no continuous aggregates on a continuous aggregate materialization table -CREATE MATERIALIZED VIEW new_name_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('6', time_bucket), COUNT("count") - FROM new_name - GROUP BY 1 WITH NO DATA; -psql:include/cagg_ddl_common.sql:326: ERROR: hypertable is a continuous aggregate materialization table -\set ON_ERROR_STOP 1 -CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('metrics', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (8,public,metrics,t) -(1 row) - -\else -SELECT create_hypertable('metrics','time'); -\endif -INSERT INTO metrics SELECT generate_series('2000-01-01'::timestamptz,'2000-01-10','1m'),1,0.25,0.75; --- check expressions in view definition -CREATE MATERIALIZED VIEW cagg_expr - WITH (timescaledb.continuous, timescaledb.materialized_only=true) -AS -SELECT - time_bucket('1d', time) AS time, - 'Const'::text AS Const, - 4.3::numeric AS "numeric", - first(metrics,time), - CASE WHEN true THEN 'foo' ELSE 'bar' END, - COALESCE(NULL,'coalesce'), - avg(v1) + avg(v2) AS avg1, - avg(v1+v2) AS avg2 -FROM metrics -GROUP BY 1 WITH NO DATA; -CALL refresh_continuous_aggregate('cagg_expr', NULL, NULL); -SELECT * FROM cagg_expr ORDER BY time LIMIT 5; - time | const | numeric | first | case | coalesce | avg1 | avg2 -------------------------------+-------+---------+----------------------------------------------+------+----------+------+------ - Fri Dec 31 16:00:00 1999 PST | Const | 4.3 | ("Sat Jan 01 00:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 - Sat Jan 01 16:00:00 2000 PST | Const | 4.3 | ("Sat Jan 01 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 - Sun Jan 02 16:00:00 2000 PST | Const | 4.3 | ("Sun Jan 02 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 - Mon Jan 03 16:00:00 2000 PST | Const | 4.3 | ("Mon Jan 03 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 - Tue Jan 04 16:00:00 2000 PST | Const | 4.3 | ("Tue Jan 04 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 -(5 rows) - ---test materialization of invalidation before drop -DROP TABLE IF EXISTS drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:358: NOTICE: table "drop_chunks_table" does not exist, skipping -DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; -psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk -CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT hypertable_id AS drop_chunks_table_nid - FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset -\else -SELECT hypertable_id AS drop_chunks_table_nid - FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset -\endif -CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$; -$DIST$); -\endif -SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test2'); - set_integer_now_func ----------------------- - -(1 row) - -CREATE MATERIALIZED VIEW drop_chunks_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('5', time), max(data) - FROM drop_chunks_table - GROUP BY 1 WITH NO DATA; -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; ---dropping chunks will process the invalidations -SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); - drop_chunks ------------------------------------------------ - _timescaledb_internal._dist_hyper_10_13_chunk -(1 row) - -SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; - time | data -------+------ - 10 | 10 -(1 row) - -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(20, 35) AS i; -CALL refresh_continuous_aggregate('drop_chunks_view', 10, 40); ---this will be seen after the drop its within the invalidation window and will be dropped -INSERT INTO drop_chunks_table VALUES (26, 100); ---this will not be processed by the drop since chunk 30-39 is not dropped but will be seen after refresh ---shows that the drop doesn't do more work than necessary -INSERT INTO drop_chunks_table VALUES (31, 200); ---move the time up to 39 -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(35, 39) AS i; ---the chunks and ranges we have thus far -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table'; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_14_chunk | 10 | 20 - _dist_hyper_10_15_chunk | 20 | 30 - _dist_hyper_10_16_chunk | 30 | 40 -(3 rows) - ---the invalidation on 25 not yet seen -SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; - time_bucket | max --------------+----- - 35 | 35 - 30 | 34 - 25 | 29 - 20 | 24 - 15 | 19 - 10 | 14 -(6 rows) - ---refresh to process the invalidations and then drop -CALL refresh_continuous_aggregate('drop_chunks_view', NULL, (integer_now_test2()-9)); -SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9)); - drop_chunks ------------------------------------------------ - _timescaledb_internal._dist_hyper_10_14_chunk - _timescaledb_internal._dist_hyper_10_15_chunk -(2 rows) - ---new values on 25 now seen in view -SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; - time_bucket | max --------------+----- - 35 | 35 - 30 | 34 - 25 | 100 - 20 | 24 - 15 | 19 - 10 | 14 -(6 rows) - ---earliest datapoint now in table -SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1; - time | data -------+------ - 30 | 30 -(1 row) - ---we see the chunks row with the dropped flags set; -SELECT * FROM _timescaledb_catalog.chunk where dropped; - id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk -----+---------------+-----------------------+-------------------------+---------------------+---------+--------+----------- - 13 | 10 | _timescaledb_internal | _dist_hyper_10_13_chunk | | t | 0 | f - 14 | 10 | _timescaledb_internal | _dist_hyper_10_14_chunk | | t | 0 | f - 15 | 10 | _timescaledb_internal | _dist_hyper_10_15_chunk | | t | 0 | f -(3 rows) - ---still see data in the view -SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC; - time_bucket | max --------------+----- - 25 | 100 - 20 | 24 - 15 | 19 - 10 | 14 -(4 rows) - ---no data but covers dropped chunks -SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; - time | data -------+------ -(0 rows) - ---recreate the dropped chunk -INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i; ---see data from recreated region -SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC; - time | data -------+------ - 20 | 20 - 19 | 19 - 18 | 18 - 17 | 17 - 16 | 16 - 15 | 15 - 14 | 14 - 13 | 13 - 12 | 12 - 11 | 11 - 10 | 10 - 9 | 9 - 8 | 8 - 7 | 7 - 6 | 6 - 5 | 5 - 4 | 4 - 3 | 3 - 2 | 2 - 1 | 1 - 0 | 0 -(21 rows) - ---should show chunk with old name and old ranges -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table' -ORDER BY range_start_integer; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_13_chunk | 0 | 10 - _dist_hyper_10_14_chunk | 10 | 20 - _dist_hyper_10_15_chunk | 20 | 30 - _dist_hyper_10_16_chunk | 30 | 40 -(4 rows) - ---We dropped everything up to the bucket starting at 30 and then ---inserted new data up to and including time 20. Therefore, the ---dropped data should stay the same as long as we only refresh ---buckets that have non-dropped data. -CALL refresh_continuous_aggregate('drop_chunks_view', 30, 40); -SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC; - time_bucket | max --------------+----- - 35 | 39 - 30 | 200 - 25 | 100 - 20 | 24 - 15 | 19 - 10 | 14 -(6 rows) - -SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, - schema_name AS drop_chunks_mat_schema, - table_name AS drop_chunks_mat_table_name - FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg - WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_nid - AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset --- TEST drop chunks from continuous aggregates by specifying view name -SELECT drop_chunks('drop_chunks_view', - newer_than => -20, - verbose => true); -psql:include/cagg_ddl_common.sql:454: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk - drop_chunks ------------------------------------------- - _timescaledb_internal._hyper_11_17_chunk -(1 row) - --- Test that we cannot drop chunks when specifying materialized --- hypertable -INSERT INTO drop_chunks_table SELECT generate_series(45, 55), 500; -CALL refresh_continuous_aggregate('drop_chunks_view', 45, 55); -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer; - chunk_name | range_start_integer | range_end_integer ---------------------+---------------------+------------------- - _hyper_11_20_chunk | 0 | 100 -(1 row) - -\set ON_ERROR_STOP 0 -\set VERBOSITY default -SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); -psql:include/cagg_ddl_common.sql:466: ERROR: operation not supported on materialized hypertable -DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. -HINT: Try the operation on the continuous aggregate instead. -\set VERBOSITY terse -\set ON_ERROR_STOP 1 ------------------------------------------------------------------ --- Test that refresh_continuous_aggregate on chunk will refresh, --- but only in the regions covered by the show chunks. ------------------------------------------------------------------ -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table' -ORDER BY 2,3; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_13_chunk | 0 | 10 - _dist_hyper_10_14_chunk | 10 | 20 - _dist_hyper_10_15_chunk | 20 | 30 - _dist_hyper_10_16_chunk | 30 | 40 - _dist_hyper_10_18_chunk | 40 | 50 - _dist_hyper_10_19_chunk | 50 | 60 -(6 rows) - --- Pick the second chunk as the one to drop -WITH numbered_chunks AS ( - SELECT row_number() OVER (ORDER BY range_start_integer), chunk_schema, chunk_name, range_start_integer, range_end_integer - FROM timescaledb_information.chunks - WHERE hypertable_name = 'drop_chunks_table' - ORDER BY 1 -) -SELECT format('%I.%I', chunk_schema, chunk_name) AS chunk_to_drop, range_start_integer, range_end_integer -FROM numbered_chunks -WHERE row_number = 2 \gset --- There's data in the table for the chunk/range we will drop -SELECT * FROM drop_chunks_table -WHERE time >= :range_start_integer -AND time < :range_end_integer -ORDER BY 1; - time | data -------+------ - 10 | 10 - 11 | 11 - 12 | 12 - 13 | 13 - 14 | 14 - 15 | 15 - 16 | 16 - 17 | 17 - 18 | 18 - 19 | 19 -(10 rows) - --- Make sure there is also data in the continuous aggregate --- CARE: --- Note that this behaviour of dropping the materialization table chunks and expecting a refresh --- that overlaps that time range to NOT update those chunks is undefined. Since CAGGs over --- distributed hypertables merge the invalidations the refresh region is updated in the distributed --- case, which may be different than what happens in the normal hypertable case. The command was: --- SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); -CALL refresh_continuous_aggregate('drop_chunks_view', 0, 50); -SELECT * FROM drop_chunks_view -ORDER BY 1; - time_bucket | max --------------+----- - 0 | 4 - 5 | 9 - 10 | 14 - 15 | 19 - 20 | 20 - 30 | 200 - 35 | 39 - 45 | 500 - 50 | 500 -(9 rows) - --- Drop the second chunk, to leave a gap in the data -\if :IS_DISTRIBUTED -CALL distributed_exec(format('DROP TABLE IF EXISTS %s', :'chunk_to_drop')); -DROP FOREIGN TABLE :chunk_to_drop; -\else -DROP TABLE :chunk_to_drop; -\endif --- Verify that the second chunk is dropped -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table' -ORDER BY 2,3; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_13_chunk | 0 | 10 - _dist_hyper_10_15_chunk | 20 | 30 - _dist_hyper_10_16_chunk | 30 | 40 - _dist_hyper_10_18_chunk | 40 | 50 - _dist_hyper_10_19_chunk | 50 | 60 -(5 rows) - --- Data is no longer in the table but still in the view -SELECT * FROM drop_chunks_table -WHERE time >= :range_start_integer -AND time < :range_end_integer -ORDER BY 1; - time | data -------+------ -(0 rows) - -SELECT * FROM drop_chunks_view -WHERE time_bucket >= :range_start_integer -AND time_bucket < :range_end_integer -ORDER BY 1; - time_bucket | max --------------+----- - 10 | 14 - 15 | 19 -(2 rows) - --- Insert a large value in one of the chunks that will be dropped -INSERT INTO drop_chunks_table VALUES (:range_start_integer-1, 100); --- Now refresh and drop the two adjecent chunks -CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30); -SELECT drop_chunks('drop_chunks_table', older_than=>30); - drop_chunks ------------------------------------------------ - _timescaledb_internal._dist_hyper_10_13_chunk - _timescaledb_internal._dist_hyper_10_15_chunk -(2 rows) - --- Verify that the chunks are dropped -SELECT chunk_name, range_start_integer, range_end_integer -FROM timescaledb_information.chunks -WHERE hypertable_name = 'drop_chunks_table' -ORDER BY 2,3; - chunk_name | range_start_integer | range_end_integer --------------------------+---------------------+------------------- - _dist_hyper_10_16_chunk | 30 | 40 - _dist_hyper_10_18_chunk | 40 | 50 - _dist_hyper_10_19_chunk | 50 | 60 -(3 rows) - --- The continuous aggregate should be refreshed in the regions covered --- by the dropped chunks, but not in the "gap" region, i.e., the --- region of the chunk that was dropped via DROP TABLE. -SELECT * FROM drop_chunks_view -ORDER BY 1; - time_bucket | max --------------+----- - 0 | 4 - 5 | 100 - 20 | 20 - 30 | 200 - 35 | 39 - 45 | 500 - 50 | 500 -(7 rows) - --- Now refresh in the region of the first two dropped chunks -CALL refresh_continuous_aggregate('drop_chunks_view', 0, :range_end_integer); --- Aggregate data in the refreshed range should no longer exist since --- the underlying data was dropped. -SELECT * FROM drop_chunks_view -ORDER BY 1; - time_bucket | max --------------+----- - 20 | 20 - 30 | 200 - 35 | 39 - 45 | 500 - 50 | 500 -(5 rows) - --------------------------------------------------------------------- --- Check that we can create a materialized table in a tablespace. We --- create one with tablespace and one without and compare them. -CREATE VIEW cagg_info AS -WITH - caggs AS ( - SELECT format('%I.%I', user_view_schema, user_view_name)::regclass AS user_view, - format('%I.%I', direct_view_schema, direct_view_name)::regclass AS direct_view, - format('%I.%I', partial_view_schema, partial_view_name)::regclass AS partial_view, - format('%I.%I', ht.schema_name, ht.table_name)::regclass AS mat_relid - FROM _timescaledb_catalog.hypertable ht, - _timescaledb_catalog.continuous_agg cagg - WHERE ht.id = cagg.mat_hypertable_id - ) -SELECT user_view, - pg_get_userbyid(relowner) AS user_view_owner, - relname AS mat_table, - (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = mat_relid) AS mat_table_owner, - direct_view, - (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = direct_view) AS direct_view_owner, - partial_view, - (SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = partial_view) AS partial_view_owner, - (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace - FROM pg_class JOIN caggs ON pg_class.oid = caggs.mat_relid; -GRANT SELECT ON cagg_info TO PUBLIC; -CREATE VIEW chunk_info AS -SELECT ht.schema_name, ht.table_name, relname AS chunk_name, - (SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace - FROM pg_class c, - _timescaledb_catalog.hypertable ht, - _timescaledb_catalog.chunk ch - WHERE ch.table_name = c.relname AND ht.id = ch.hypertable_id; -CREATE TABLE whatever(time BIGINT NOT NULL, data INTEGER); -\if :IS_DISTRIBUTED -SELECT hypertable_id AS whatever_nid - FROM create_distributed_hypertable('whatever', 'time', chunk_time_interval => 10, replication_factor => 2) -\gset -\else -SELECT hypertable_id AS whatever_nid - FROM create_hypertable('whatever', 'time', chunk_time_interval => 10) -\gset -\endif -SELECT set_integer_now_func('whatever', 'integer_now_test'); - set_integer_now_func ----------------------- - -(1 row) - -CREATE MATERIALIZED VIEW whatever_view_1 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT time_bucket('5', time), COUNT(data) - FROM whatever GROUP BY 1 WITH NO DATA; -CREATE MATERIALIZED VIEW whatever_view_2 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) -TABLESPACE tablespace1 AS -SELECT time_bucket('5', time), COUNT(data) - FROM whatever GROUP BY 1 WITH NO DATA; -INSERT INTO whatever SELECT i, i FROM generate_series(0, 29) AS i; -CALL refresh_continuous_aggregate('whatever_view_1', NULL, NULL); -CALL refresh_continuous_aggregate('whatever_view_2', NULL, NULL); -SELECT user_view, - mat_table, - cagg_info.tablespace AS mat_tablespace, - chunk_name, - chunk_info.tablespace AS chunk_tablespace - FROM cagg_info, chunk_info - WHERE mat_table::text = table_name - AND user_view::text LIKE 'whatever_view%'; - user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace ------------------+-----------------------------+----------------+--------------------+------------------ - whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk | - whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 -(2 rows) - -ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2; -SELECT user_view, - mat_table, - cagg_info.tablespace AS mat_tablespace, - chunk_name, - chunk_info.tablespace AS chunk_tablespace - FROM cagg_info, chunk_info - WHERE mat_table::text = table_name - AND user_view::text LIKE 'whatever_view%'; - user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace ------------------+-----------------------------+----------------+--------------------+------------------ - whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2 - whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1 -(2 rows) - -DROP MATERIALIZED VIEW whatever_view_1; -psql:include/cagg_ddl_common.sql:644: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk -DROP MATERIALIZED VIEW whatever_view_2; -psql:include/cagg_ddl_common.sql:645: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk --- test bucket width expressions on integer hypertables -CREATE TABLE metrics_int2 ( - time int2 NOT NULL, - device_id int, - v1 float, - v2 float -); -CREATE TABLE metrics_int4 ( - time int4 NOT NULL, - device_id int, - v1 float, - v2 float -); -CREATE TABLE metrics_int8 ( - time int8 NOT NULL, - device_id int, - v1 float, - v2 float -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10, replication_factor => 2) -FROM ( - VALUES ('int2'), - ('int4'), - ('int8')) v (dt); - create_distributed_hypertable -------------------------------- - (15,public,metrics_int2,t) - (16,public,metrics_int4,t) - (17,public,metrics_int8,t) -(3 rows) - -\else -SELECT create_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10) -FROM ( - VALUES ('int2'), - ('int4'), - ('int8')) v (dt); -\endif -CREATE OR REPLACE FUNCTION int2_now () - RETURNS int2 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int2 -$$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION int2_now () - RETURNS int2 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int2 -$$; -$DIST$); -\endif -CREATE OR REPLACE FUNCTION int4_now () - RETURNS int4 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int4 -$$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION int4_now () - RETURNS int4 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int4 -$$; -$DIST$); -\endif -CREATE OR REPLACE FUNCTION int8_now () - RETURNS int8 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int8 -$$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ -CREATE OR REPLACE FUNCTION int8_now () - RETURNS int8 - LANGUAGE SQL - STABLE - AS $$ - SELECT 10::int8 -$$; -$DIST$); -\endif -SELECT set_integer_now_func (('metrics_' || dt)::regclass, (dt || '_now')::regproc) -FROM ( - VALUES ('int2'), - ('int4'), - ('int8')) v (dt); - set_integer_now_func ----------------------- - - - -(3 rows) - --- width expression for int2 hypertables -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1::smallint, time) -FROM metrics_int2 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:750: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1::smallint + 2::smallint, time) -FROM metrics_int2 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:757: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; --- width expression for int4 hypertables -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1, time) -FROM metrics_int4 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:765: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1 + 2, time) -FROM metrics_int4 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:772: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; --- width expression for int8 hypertables -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1, time) -FROM metrics_int8 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:780: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1 + 2, time) -FROM metrics_int8 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:787: NOTICE: continuous aggregate "width_expr" is already up-to-date -DROP MATERIALIZED VIEW width_expr; -\set ON_ERROR_STOP 0 --- non-immutable expresions should be rejected -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(extract(year FROM now())::smallint, time) -FROM metrics_int2 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:796: ERROR: only immutable expressions allowed in time bucket function -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(extract(year FROM now())::int, time) -FROM metrics_int4 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function -CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(extract(year FROM now())::int, time) -FROM metrics_int8 -GROUP BY 1; -psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function -\set ON_ERROR_STOP 1 --- Test various ALTER MATERIALIZED VIEW statements. -SET ROLE :ROLE_DEFAULT_PERM_USER; -CREATE MATERIALIZED VIEW owner_check WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(1 + 2, time) -FROM metrics_int8 -GROUP BY 1 -WITH NO DATA; -\x on -SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; --[ RECORD 1 ]------+--------------------------------------- -user_view | owner_check -user_view_owner | default_perm_user -mat_table | _materialized_hypertable_24 -mat_table_owner | default_perm_user -direct_view | _timescaledb_internal._direct_view_24 -direct_view_owner | default_perm_user -partial_view | _timescaledb_internal._partial_view_24 -partial_view_owner | default_perm_user -tablespace | - -\x off --- This should not work since the target user has the wrong role, but --- we test that the normal checks are done when changing the owner. -\set ON_ERROR_STOP 0 -ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; -psql:include/cagg_ddl_common.sql:826: ERROR: must be member of role "test_role_1" -\set ON_ERROR_STOP 1 --- Superuser can always change owner -SET ROLE :ROLE_CLUSTER_SUPERUSER; -ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; -\x on -SELECT * FROM cagg_info WHERE user_view::text = 'owner_check'; --[ RECORD 1 ]------+--------------------------------------- -user_view | owner_check -user_view_owner | test_role_1 -mat_table | _materialized_hypertable_24 -mat_table_owner | test_role_1 -direct_view | _timescaledb_internal._direct_view_24 -direct_view_owner | test_role_1 -partial_view | _timescaledb_internal._partial_view_24 -partial_view_owner | test_role_1 -tablespace | - -\x off --- --- Test drop continuous aggregate cases --- --- Issue: #2608 --- -CREATE OR REPLACE FUNCTION test_int_now() - RETURNS INT LANGUAGE SQL STABLE AS -$BODY$ - SELECT 50; -$BODY$; -\if :IS_DISTRIBUTED -CALL distributed_exec($DIST$ - CREATE OR REPLACE FUNCTION test_int_now() - RETURNS INT LANGUAGE SQL STABLE AS - $BODY$ - SELECT 50; - $BODY$; -$DIST$); -\endif -CREATE TABLE conditionsnm(time_int INT NOT NULL, device INT, value FLOAT); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10, replication_factor => 2); - create_distributed_hypertable -------------------------------- - (25,public,conditionsnm,t) -(1 row) - -\else -SELECT create_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10); -\endif -SELECT set_integer_now_func('conditionsnm', 'test_int_now'); - set_integer_now_func ----------------------- - -(1 row) - -INSERT INTO conditionsnm -SELECT time_val, time_val % 4, 3.14 FROM generate_series(0,100,1) AS time_val; --- Case 1: DROP -CREATE MATERIALIZED VIEW conditionsnm_4 -WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) -AS -SELECT time_bucket(7, time_int) as bucket, -SUM(value), COUNT(value) -FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:874: NOTICE: refreshing continuous aggregate "conditionsnm_4" -DROP materialized view conditionsnm_4; -psql:include/cagg_ddl_common.sql:876: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk --- Case 2: DROP CASCADE should have similar behaviour as DROP -CREATE MATERIALIZED VIEW conditionsnm_4 -WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) -AS -SELECT time_bucket(7, time_int) as bucket, -SUM(value), COUNT(value) -FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:884: NOTICE: refreshing continuous aggregate "conditionsnm_4" -DROP materialized view conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:886: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk --- Case 3: require CASCADE in case of dependent object -CREATE MATERIALIZED VIEW conditionsnm_4 -WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) -AS -SELECT time_bucket(7, time_int) as bucket, -SUM(value), COUNT(value) -FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:894: NOTICE: refreshing continuous aggregate "conditionsnm_4" -CREATE VIEW see_cagg as select * from conditionsnm_4; -\set ON_ERROR_STOP 0 -DROP MATERIALIZED VIEW conditionsnm_4; -psql:include/cagg_ddl_common.sql:898: ERROR: cannot drop view conditionsnm_4 because other objects depend on it -\set ON_ERROR_STOP 1 --- Case 4: DROP CASCADE with dependency -DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to view see_cagg -psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk --- Test DROP SCHEMA CASCADE with continuous aggregates --- --- Issue: #2350 --- --- Case 1: DROP SCHEMA CASCADE -CREATE SCHEMA test_schema; -CREATE TABLE test_schema.telemetry_raw ( - ts TIMESTAMP WITH TIME ZONE NOT NULL, - value DOUBLE PRECISION -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); - create_distributed_hypertable ----------------------------------- - (29,test_schema,telemetry_raw,t) -(1 row) - -\else -SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); -\endif -CREATE MATERIALIZED VIEW test_schema.telemetry_1s - WITH (timescaledb.continuous, timescaledb.materialized_only=false) - AS -SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, - avg(value) - FROM test_schema.telemetry_raw - GROUP BY ts_1s WITH NO DATA; -SELECT ca.raw_hypertable_id, - h.schema_name, - h.table_name AS "MAT_TABLE_NAME", - partial_view_name as "PART_VIEW_NAME", - partial_view_schema -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'telemetry_1s'; - raw_hypertable_id | schema_name | MAT_TABLE_NAME | PART_VIEW_NAME | partial_view_schema --------------------+-----------------------+-----------------------------+------------------+----------------------- - 29 | _timescaledb_internal | _materialized_hypertable_30 | _partial_view_30 | _timescaledb_internal -(1 row) - -\gset -DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:941: NOTICE: drop cascades to 4 other objects -SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = 'telemetry_1s'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; - count -------- - 0 -(1 row) - --- Case 2: DROP SCHEMA CASCADE with multiple caggs -CREATE SCHEMA test_schema; -CREATE TABLE test_schema.telemetry_raw ( - ts TIMESTAMP WITH TIME ZONE NOT NULL, - value DOUBLE PRECISION -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); - create_distributed_hypertable ----------------------------------- - (31,test_schema,telemetry_raw,t) -(1 row) - -\else -SELECT create_hypertable('test_schema.telemetry_raw', 'ts'); -\endif -CREATE MATERIALIZED VIEW test_schema.cagg1 - WITH (timescaledb.continuous, timescaledb.materialized_only=false) - AS -SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, - avg(value) - FROM test_schema.telemetry_raw - GROUP BY ts_1s WITH NO DATA; -CREATE MATERIALIZED VIEW test_schema.cagg2 - WITH (timescaledb.continuous, timescaledb.materialized_only=false) - AS -SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s, - avg(value) - FROM test_schema.telemetry_raw - GROUP BY ts_1s WITH NO DATA; -SELECT ca.raw_hypertable_id, - h.schema_name, - h.table_name AS "MAT_TABLE_NAME1", - partial_view_name as "PART_VIEW_NAME1", - partial_view_schema -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'cagg1'; - raw_hypertable_id | schema_name | MAT_TABLE_NAME1 | PART_VIEW_NAME1 | partial_view_schema --------------------+-----------------------+-----------------------------+------------------+----------------------- - 31 | _timescaledb_internal | _materialized_hypertable_32 | _partial_view_32 | _timescaledb_internal -(1 row) - -\gset -SELECT ca.raw_hypertable_id, - h.schema_name, - h.table_name AS "MAT_TABLE_NAME2", - partial_view_name as "PART_VIEW_NAME2", - partial_view_schema -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'cagg2'; - raw_hypertable_id | schema_name | MAT_TABLE_NAME2 | PART_VIEW_NAME2 | partial_view_schema --------------------+-----------------------+-----------------------------+------------------+----------------------- - 31 | _timescaledb_internal | _materialized_hypertable_33 | _partial_view_33 | _timescaledb_internal -(1 row) - -\gset -DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:998: NOTICE: drop cascades to 7 other objects -SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME1'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = 'cagg1'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME2'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME2'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_class WHERE relname = 'cagg2'; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema'; - count -------- - 0 -(1 row) - -DROP TABLESPACE tablespace1; -DROP TABLESPACE tablespace2; --- Check that we can rename a column of a materialized view and still --- rebuild it after (#3051, #3405) -CREATE TABLE conditions ( - time TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (34,public,conditions,t) -(1 row) - -\else -SELECT create_hypertable('conditions', 'time'); -\endif -INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); -INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); -INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); -INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); -INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); -INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); -INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); -INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); -INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); -INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); -INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); -CREATE MATERIALIZED VIEW conditions_daily -WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS -SELECT location, - time_bucket(INTERVAL '1 day', time) AS bucket, - AVG(temperature) - FROM conditions -GROUP BY location, bucket -WITH NO DATA; -SELECT format('%I.%I', '_timescaledb_internal', h.table_name) AS "MAT_TABLE_NAME", - format('%I.%I', '_timescaledb_internal', partial_view_name) AS "PART_VIEW_NAME", - format('%I.%I', '_timescaledb_internal', direct_view_name) AS "DIRECT_VIEW_NAME" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'conditions_daily' -\gset --- Show both the columns and the view definitions to see that --- references are correct in the view as well. -SELECT * FROM test.show_columns('conditions_daily'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - bucket | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - bucket | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - bucket | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - bucket | timestamp with time zone | t - avg | double precision | f -(3 rows) - -ALTER MATERIALIZED VIEW conditions_daily RENAME COLUMN bucket to "time"; --- Show both the columns and the view definitions to see that --- references are correct in the view as well. -SELECT * FROM test.show_columns(' conditions_daily'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - time | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - time | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'PART_VIEW_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - time | timestamp with time zone | f - avg | double precision | f -(3 rows) - -SELECT * FROM test.show_columns(:'MAT_TABLE_NAME'); - Column | Type | NotNull -----------+--------------------------+--------- - location | text | f - time | timestamp with time zone | t - avg | double precision | f -(3 rows) - --- This will rebuild the materialized view and should succeed. -ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only = false); --- Refresh the continuous aggregate to check that it works after the --- rename. -\set VERBOSITY verbose -CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); -\set VERBOSITY terse --- --- Indexes on continuous aggregate --- -\set ON_ERROR_STOP 0 --- unique indexes are not supported -CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); -psql:include/cagg_ddl_common.sql:1084: ERROR: continuous aggregates do not support UNIQUE indexes --- concurrently index creation not supported -CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); -psql:include/cagg_ddl_common.sql:1086: ERROR: hypertables do not support concurrent index creation -\set ON_ERROR_STOP 1 -CREATE INDEX index_avg ON conditions_daily (avg); -CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); -CREATE INDEX index_avg_include ON conditions_daily (avg) INCLUDE (location); -CREATE INDEX index_avg_expr ON conditions_daily ((avg + 1)); -CREATE INDEX index_avg_location_sfo ON conditions_daily (avg) WHERE location = 'SFO'; -CREATE INDEX index_avg_expr_location_sfo ON conditions_daily ((avg + 2)) WHERE location = 'SFO'; -SELECT * FROM test.show_indexespred(:'MAT_TABLE_NAME'); - Index | Columns | Expr | Pred | Unique | Primary | Exclusion | Tablespace ------------------------------------------------------------------------+-------------------+---------------------------+------------------------+--------+---------+-----------+------------ - _timescaledb_internal._materialized_hypertable_35_bucket_idx | {bucket} | | | f | f | f | - _timescaledb_internal._materialized_hypertable_35_location_bucket_idx | {location,bucket} | | | f | f | f | - _timescaledb_internal.index_avg | {avg} | | | f | f | f | - _timescaledb_internal.index_avg_expr | {expr} | avg + 1::double precision | | f | f | f | - _timescaledb_internal.index_avg_expr_location_sfo | {expr} | avg + 2::double precision | location = 'SFO'::text | f | f | f | - _timescaledb_internal.index_avg_include | {avg,location} | | | f | f | f | - _timescaledb_internal.index_avg_location_sfo | {avg} | | location = 'SFO'::text | f | f | f | - _timescaledb_internal.index_avg_only | {avg} | | | f | f | f | -(8 rows) - --- #3696 assertion failure when referencing columns not present in result -CREATE TABLE i3696(time timestamptz NOT NULL, search_query text, cnt integer, cnt2 integer); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('i3696', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (36,public,i3696,t) -(1 row) - -\else -SELECT table_name FROM create_hypertable('i3696','time'); -\endif -CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous, timescaledb.materialized_only=false) -AS - SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket - FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; -psql:include/cagg_ddl_common.sql:1108: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date -ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); -CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous, timescaledb.materialized_only=false) -AS - SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket - FROM i3696 GROUP BY cnt + cnt2, bucket, search_query - HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; -psql:include/cagg_ddl_common.sql:1116: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date -ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); ---TEST test with multiple settings on continuous aggregates -- --- test for materialized_only + compress combinations (real time aggs enabled initially) -CREATE TABLE test_setting(time timestamptz not null, val numeric); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('test_setting', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (39,public,test_setting,t) -(1 row) - -\else -SELECT create_hypertable('test_setting', 'time'); -\endif -CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only=false) -AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1130: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date -INSERT INTO test_setting -SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; -CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - ---this row is not in the materialized result --- -INSERT INTO test_setting VALUES( '2020-11-01', 20); ---try out 2 settings here -- -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1141: NOTICE: defaulting compress_orderby to time_bucket -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | t | t -(1 row) - ---real time aggs is off now , should return 20 -- -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - ---now set it back to false -- -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1149: NOTICE: defaulting compress_orderby to time_bucket -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | t | f -(1 row) - ---count should return additional data since we have real time aggs on -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 21 -(1 row) - -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | f | t -(1 row) - ---real time aggs is off now , should return 20 -- -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | f | f -(1 row) - ---count should return additional data since we have real time aggs on -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 21 -(1 row) - -DELETE FROM test_setting WHERE val = 20; ---TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- --- test for materialized_only + compress combinations (real time aggs enabled initially) -DROP MATERIALIZED VIEW test_setting_cagg; -psql:include/cagg_ddl_common.sql:1174: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk -CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) -AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1177: NOTICE: refreshing continuous aggregate "test_setting_cagg" -CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - ---this row is not in the materialized result --- -INSERT INTO test_setting VALUES( '2020-11-01', 20); ---try out 2 settings here -- -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1185: NOTICE: defaulting compress_orderby to time_bucket -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | t | f -(1 row) - ---count should return additional data since we have real time aggs on -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 21 -(1 row) - ---now set it back to false -- -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); -psql:include/cagg_ddl_common.sql:1193: NOTICE: defaulting compress_orderby to time_bucket -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | t | t -(1 row) - ---real time aggs is off now , should return 20 -- -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false'); -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | f | f -(1 row) - ---count should return additional data since we have real time aggs on -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 21 -(1 row) - -ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false'); -SELECT view_name, compression_enabled, materialized_only -FROM timescaledb_information.continuous_aggregates -where view_name = 'test_setting_cagg'; - view_name | compression_enabled | materialized_only --------------------+---------------------+------------------- - test_setting_cagg | f | t -(1 row) - ---real time aggs is off now , should return 20 -- -SELECT count(*) from test_setting_cagg ORDER BY 1; - count -------- - 20 -(1 row) - --- END TEST with multiple settings --- Test View Target Entries that contain both aggrefs and Vars in the same expression -CREATE TABLE transactions -( - "time" timestamp with time zone NOT NULL, - dummy1 integer, - dummy2 integer, - dummy3 integer, - dummy4 integer, - dummy5 integer, - amount integer, - fiat_value integer -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('transactions', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (46,public,transactions,t) -(1 row) - -\else -SELECT create_hypertable('transactions', 'time'); -\endif -INSERT INTO transactions VALUES ( '2018-01-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-01-02 09:30:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-01-02 09:20:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-01-02 09:10:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 10:40:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 11:50:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 12:10:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-11-01 13:10:00-08', 0, 0, 0, 0, 0, -1, 10); -INSERT INTO transactions VALUES ( '2018-11-02 09:20:00-08', 0, 0, 0, 0, 0, 1, 10); -INSERT INTO transactions VALUES ( '2018-11-02 10:30:00-08', 0, 0, 0, 0, 0, -1, 10); -CREATE materialized view cashflows( - bucket, - amount, - cashflow, - cashflow2 -) WITH ( - timescaledb.continuous, - timescaledb.materialized_only = true -) AS -SELECT time_bucket ('1 day', time) AS bucket, - amount, - CASE - WHEN amount < 0 THEN (0 - sum(fiat_value)) - ELSE sum(fiat_value) - END AS cashflow, - amount + sum(fiat_value) -FROM transactions -GROUP BY bucket, amount; -psql:include/cagg_ddl_common.sql:1267: NOTICE: refreshing continuous aggregate "cashflows" -SELECT h.table_name AS "MAT_TABLE_NAME", - partial_view_name AS "PART_VIEW_NAME", - direct_view_name AS "DIRECT_VIEW_NAME" -FROM _timescaledb_catalog.continuous_agg ca -INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id) -WHERE user_view_name = 'cashflows' -\gset --- Show both the columns and the view definitions to see that --- references are correct in the view as well. -\d+ "_timescaledb_internal".:"DIRECT_VIEW_NAME" - View "_timescaledb_internal._direct_view_47" - Column | Type | Collation | Nullable | Default | Storage | Description ------------+--------------------------+-----------+----------+---------+---------+------------- - bucket | timestamp with time zone | | | | plain | - amount | integer | | | | plain | - cashflow | bigint | | | | plain | - cashflow2 | bigint | | | | plain | -View definition: - SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, - transactions.amount, - CASE - WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) - ELSE sum(transactions.fiat_value) - END AS cashflow, - transactions.amount + sum(transactions.fiat_value) AS cashflow2 - FROM transactions - GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; - -\d+ "_timescaledb_internal".:"PART_VIEW_NAME" - View "_timescaledb_internal._partial_view_47" - Column | Type | Collation | Nullable | Default | Storage | Description ------------+--------------------------+-----------+----------+---------+---------+------------- - bucket | timestamp with time zone | | | | plain | - amount | integer | | | | plain | - cashflow | bigint | | | | plain | - cashflow2 | bigint | | | | plain | -View definition: - SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, - transactions.amount, - CASE - WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) - ELSE sum(transactions.fiat_value) - END AS cashflow, - transactions.amount + sum(transactions.fiat_value) AS cashflow2 - FROM transactions - GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; - -\d+ "_timescaledb_internal".:"MAT_TABLE_NAME" - Table "_timescaledb_internal._materialized_hypertable_47" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ------------+--------------------------+-----------+----------+---------+---------+--------------+------------- - bucket | timestamp with time zone | | not null | | plain | | - amount | integer | | | | plain | | - cashflow | bigint | | | | plain | | - cashflow2 | bigint | | | | plain | | -Indexes: - "_materialized_hypertable_47_amount_bucket_idx" btree (amount, bucket DESC) - "_materialized_hypertable_47_bucket_idx" btree (bucket DESC) -Triggers: - ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_47 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() -Child tables: _timescaledb_internal._hyper_47_52_chunk, - _timescaledb_internal._hyper_47_53_chunk - -\d+ 'cashflows' - View "public.cashflows" - Column | Type | Collation | Nullable | Default | Storage | Description ------------+--------------------------+-----------+----------+---------+---------+------------- - bucket | timestamp with time zone | | | | plain | - amount | integer | | | | plain | - cashflow | bigint | | | | plain | - cashflow2 | bigint | | | | plain | -View definition: - SELECT _materialized_hypertable_47.bucket, - _materialized_hypertable_47.amount, - _materialized_hypertable_47.cashflow, - _materialized_hypertable_47.cashflow2 - FROM _timescaledb_internal._materialized_hypertable_47; - -SELECT * FROM cashflows; - bucket | amount | cashflow | cashflow2 -------------------------------+--------+----------+----------- - Sun Dec 31 16:00:00 2017 PST | 1 | 10 | 11 - Mon Jan 01 16:00:00 2018 PST | -1 | -30 | 29 - Wed Oct 31 17:00:00 2018 PDT | -1 | -20 | 19 - Wed Oct 31 17:00:00 2018 PDT | 1 | 30 | 31 - Thu Nov 01 17:00:00 2018 PDT | -1 | -10 | 9 - Thu Nov 01 17:00:00 2018 PDT | 1 | 10 | 11 -(6 rows) - --- test cagg creation with named arguments in time_bucket --- note that positional arguments cannot follow named arguments --- 1. test named origin --- 2. test named timezone --- 3. test named ts --- 4. test named bucket width --- named origin -CREATE MATERIALIZED VIEW cagg_named_origin WITH -(timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket('1h', time, 'UTC', origin => '2001-01-03 01:23:45') AS bucket, -avg(amount) as avg_amount -FROM transactions GROUP BY 1 WITH NO DATA; --- named timezone -CREATE MATERIALIZED VIEW cagg_named_tz_origin WITH -(timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket('1h', time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, -avg(amount) as avg_amount -FROM transactions GROUP BY 1 WITH NO DATA; --- named ts -CREATE MATERIALIZED VIEW cagg_named_ts_tz_origin WITH -(timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket('1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, -avg(amount) as avg_amount -FROM transactions GROUP BY 1 WITH NO DATA; --- named bucket width -CREATE MATERIALIZED VIEW cagg_named_all WITH -(timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(bucket_width => '1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, -avg(amount) as avg_amount -FROM transactions GROUP BY 1 WITH NO DATA; --- Refreshing from the beginning (NULL) of a CAGG with variable time bucket and --- using an INTERVAL for the end timestamp (issue #5534) -CREATE MATERIALIZED VIEW transactions_montly -WITH (timescaledb.continuous, timescaledb.materialized_only = true) AS -SELECT time_bucket(INTERVAL '1 month', time) AS bucket, - SUM(fiat_value), - MAX(fiat_value), - MIN(fiat_value) - FROM transactions -GROUP BY 1 -WITH NO DATA; --- No rows -SELECT * FROM transactions_montly ORDER BY bucket; - bucket | sum | max | min ---------+-----+-----+----- -(0 rows) - --- Refresh from beginning of the CAGG for 1 month -CALL refresh_continuous_aggregate('transactions_montly', NULL, INTERVAL '1 month'); -SELECT * FROM transactions_montly ORDER BY bucket; - bucket | sum | max | min -------------------------------+-----+-----+----- - Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 - Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 -(2 rows) - -TRUNCATE transactions_montly; --- Partial refresh the CAGG from beginning to an specific timestamp -CALL refresh_continuous_aggregate('transactions_montly', NULL, '2018-11-01 11:50:00-08'::timestamptz); -SELECT * FROM transactions_montly ORDER BY bucket; - bucket | sum | max | min -------------------------------+-----+-----+----- - Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 -(1 row) - --- Full refresh the CAGG -CALL refresh_continuous_aggregate('transactions_montly', NULL, NULL); -SELECT * FROM transactions_montly ORDER BY bucket; - bucket | sum | max | min -------------------------------+-----+-----+----- - Sun Dec 31 16:00:00 2017 PST | 40 | 10 | 10 - Wed Oct 31 17:00:00 2018 PDT | 70 | 10 | 10 -(2 rows) - --- Check set_chunk_time_interval on continuous aggregate -CREATE MATERIALIZED VIEW cagg_set_chunk_time_interval -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT time_bucket(INTERVAL '1 month', time) AS bucket, - SUM(fiat_value), - MAX(fiat_value), - MIN(fiat_value) -FROM transactions -GROUP BY 1 -WITH NO DATA; -SELECT set_chunk_time_interval('cagg_set_chunk_time_interval', chunk_time_interval => interval '1 month'); - set_chunk_time_interval -------------------------- - -(1 row) - -CALL refresh_continuous_aggregate('cagg_set_chunk_time_interval', NULL, NULL); -SELECT _timescaledb_functions.to_interval(d.interval_length) = interval '1 month' -FROM _timescaledb_catalog.dimension d - RIGHT JOIN _timescaledb_catalog.continuous_agg ca ON ca.user_view_name = 'cagg_set_chunk_time_interval' -WHERE d.hypertable_id = ca.mat_hypertable_id; - ?column? ----------- - t -(1 row) - --- Since #6077 CAggs are materialized only by default -DROP TABLE conditions CASCADE; -psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 3 other objects -psql:include/cagg_ddl_common.sql:1365: NOTICE: drop cascades to 2 other objects -CREATE TABLE conditions ( - time TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL -); -\if :IS_DISTRIBUTED -SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (54,public,conditions,t) -(1 row) - -\else -SELECT create_hypertable('conditions', 'time'); -\endif -INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); -INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); -INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); -INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); -INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); -INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55); -INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65); -INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75); -INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85); -INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10); -INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20); -CREATE MATERIALIZED VIEW conditions_daily -WITH (timescaledb.continuous) AS -SELECT location, - time_bucket(INTERVAL '1 day', time) AS bucket, - AVG(temperature) - FROM conditions -GROUP BY location, bucket -WITH NO DATA; -\d+ conditions_daily - View "public.conditions_daily" - Column | Type | Collation | Nullable | Default | Storage | Description -----------+--------------------------+-----------+----------+---------+----------+------------- - location | text | | | | extended | - bucket | timestamp with time zone | | | | plain | - avg | double precision | | | | plain | -View definition: - SELECT _materialized_hypertable_55.location, - _materialized_hypertable_55.bucket, - _materialized_hypertable_55.avg - FROM _timescaledb_internal._materialized_hypertable_55; - --- Should return NO ROWS -SELECT * FROM conditions_daily ORDER BY bucket, avg; - location | bucket | avg -----------+--------+----- -(0 rows) - -ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=false); -\d+ conditions_daily - View "public.conditions_daily" - Column | Type | Collation | Nullable | Default | Storage | Description -----------+--------------------------+-----------+----------+---------+----------+------------- - location | text | | | | extended | - bucket | timestamp with time zone | | | | plain | - avg | double precision | | | | plain | -View definition: - SELECT _materialized_hypertable_55.location, - _materialized_hypertable_55.bucket, - _materialized_hypertable_55.avg - FROM _timescaledb_internal._materialized_hypertable_55 - WHERE _materialized_hypertable_55.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) -UNION ALL - SELECT conditions.location, - time_bucket('@ 1 day'::interval, conditions."time") AS bucket, - avg(conditions.temperature) AS avg - FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(55)), '-infinity'::timestamp with time zone) - GROUP BY conditions.location, (time_bucket('@ 1 day'::interval, conditions."time")); - --- Should return ROWS because now it is realtime -SELECT * FROM conditions_daily ORDER BY bucket, avg; - location | bucket | avg -----------+------------------------------+----- - SFO | Sun Dec 31 16:00:00 2017 PST | 55 - NYC | Mon Jan 01 16:00:00 2018 PST | 65 - SFO | Mon Jan 01 16:00:00 2018 PST | 65 - por | Mon Jan 01 16:00:00 2018 PST | 100 - NYC | Wed Oct 31 17:00:00 2018 PDT | 65 - NYC | Thu Nov 01 17:00:00 2018 PDT | 15 -(6 rows) - --- Should return ROWS because we refreshed it -ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=true); -\d+ conditions_daily - View "public.conditions_daily" - Column | Type | Collation | Nullable | Default | Storage | Description -----------+--------------------------+-----------+----------+---------+----------+------------- - location | text | | | | extended | - bucket | timestamp with time zone | | | | plain | - avg | double precision | | | | plain | -View definition: - SELECT _materialized_hypertable_55.location, - _materialized_hypertable_55.bucket, - _materialized_hypertable_55.avg - FROM _timescaledb_internal._materialized_hypertable_55; - -CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); -SELECT * FROM conditions_daily ORDER BY bucket, avg; - location | bucket | avg -----------+------------------------------+----- - SFO | Sun Dec 31 16:00:00 2017 PST | 55 - SFO | Mon Jan 01 16:00:00 2018 PST | 65 - NYC | Mon Jan 01 16:00:00 2018 PST | 65 - por | Mon Jan 01 16:00:00 2018 PST | 100 - NYC | Wed Oct 31 17:00:00 2018 PDT | 65 - NYC | Thu Nov 01 17:00:00 2018 PDT | 15 -(6 rows) - --- cleanup -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); -DROP DATABASE :DATA_NODE_3 WITH (FORCE); diff --git a/tsl/test/expected/cagg_ddl_dist_ht-16.out b/tsl/test/expected/cagg_ddl_dist_ht.out similarity index 96% rename from tsl/test/expected/cagg_ddl_dist_ht-16.out rename to tsl/test/expected/cagg_ddl_dist_ht.out index b4d7ef8aca8..f8430fde30c 100644 --- a/tsl/test/expected/cagg_ddl_dist_ht-16.out +++ b/tsl/test/expected/cagg_ddl_dist_ht.out @@ -28,6 +28,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------------+-----------------------+--------------+------------------+------------------- db_cagg_ddl_dist_ht_1 | db_cagg_ddl_dist_ht_1 | t | t | t @@ -64,6 +67,7 @@ CREATE TABLE conditions ( ); \if :IS_DISTRIBUTED SELECT table_name FROM create_distributed_hypertable('conditions', 'timec', replication_factor => 2); +psql:include/cagg_ddl_common.sql:29: WARNING: distributed hypertable is deprecated table_name ------------ conditions @@ -87,6 +91,7 @@ SET ROLE :ROLE_DEFAULT_PERM_USER; CREATE TABLE foo(time TIMESTAMPTZ NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('foo', 'time', replication_factor => 2); +psql:include/cagg_ddl_common.sql:53: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (2,public,foo,t) @@ -275,6 +280,7 @@ CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_id FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +psql:include/cagg_ddl_common.sql:171: WARNING: distributed hypertable is deprecated \else SELECT hypertable_id AS drop_chunks_table_id FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset @@ -366,6 +372,7 @@ CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_u_id FROM create_distributed_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7, replication_factor => 2) \gset +psql:include/cagg_ddl_common.sql:232: WARNING: distributed hypertable is deprecated \else SELECT hypertable_id AS drop_chunks_table_u_id FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset @@ -520,6 +527,7 @@ psql:include/cagg_ddl_common.sql:331: ERROR: hypertable is a continuous aggrega CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('metrics', 'time', replication_factor => 2); +psql:include/cagg_ddl_common.sql:336: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (8,public,metrics,t) @@ -565,6 +573,7 @@ CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_nid FROM create_distributed_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +psql:include/cagg_ddl_common.sql:369: WARNING: distributed hypertable is deprecated \else SELECT hypertable_id AS drop_chunks_table_nid FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset @@ -983,6 +992,7 @@ CREATE TABLE whatever(time BIGINT NOT NULL, data INTEGER); SELECT hypertable_id AS whatever_nid FROM create_distributed_hypertable('whatever', 'time', chunk_time_interval => 10, replication_factor => 2) \gset +psql:include/cagg_ddl_common.sql:605: WARNING: distributed hypertable is deprecated \else SELECT hypertable_id AS whatever_nid FROM create_hypertable('whatever', 'time', chunk_time_interval => 10) @@ -1064,6 +1074,9 @@ FROM ( VALUES ('int2'), ('int4'), ('int8')) v (dt); +psql:include/cagg_ddl_common.sql:679: WARNING: distributed hypertable is deprecated +psql:include/cagg_ddl_common.sql:679: WARNING: distributed hypertable is deprecated +psql:include/cagg_ddl_common.sql:679: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (15,public,metrics_int2,t) @@ -1226,7 +1239,7 @@ tablespace | -- we test that the normal checks are done when changing the owner. \set ON_ERROR_STOP 0 ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; -psql:include/cagg_ddl_common.sql:831: ERROR: must be able to SET ROLE "test_role_1" +psql:include/cagg_ddl_common.sql:831: ERROR: must be member of role "test_role_1" \set ON_ERROR_STOP 1 -- Superuser can always change owner SET ROLE :ROLE_CLUSTER_SUPERUSER; @@ -1267,6 +1280,7 @@ $DIST$); CREATE TABLE conditionsnm(time_int INT NOT NULL, device INT, value FLOAT); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_ddl_common.sql:864: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (25,public,conditionsnm,t) @@ -1332,6 +1346,7 @@ CREATE TABLE test_schema.telemetry_raw ( ); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); +psql:include/cagg_ddl_common.sql:923: WARNING: distributed hypertable is deprecated create_distributed_hypertable ---------------------------------- (29,test_schema,telemetry_raw,t) @@ -1395,6 +1410,7 @@ CREATE TABLE test_schema.telemetry_raw ( ); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('test_schema.telemetry_raw', 'ts', replication_factor => 2); +psql:include/cagg_ddl_common.sql:962: WARNING: distributed hypertable is deprecated create_distributed_hypertable ---------------------------------- (31,test_schema,telemetry_raw,t) @@ -1500,6 +1516,7 @@ CREATE TABLE conditions ( ); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); +psql:include/cagg_ddl_common.sql:1025: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (34,public,conditions,t) @@ -1644,6 +1661,7 @@ SELECT * FROM test.show_indexespred(:'MAT_TABLE_NAME'); CREATE TABLE i3696(time timestamptz NOT NULL, search_query text, cnt integer, cnt2 integer); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('i3696', 'time', replication_factor => 2); +psql:include/cagg_ddl_common.sql:1105: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (36,public,i3696,t) @@ -1670,6 +1688,7 @@ ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true') CREATE TABLE test_setting(time timestamptz not null, val numeric); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('test_setting', 'time', replication_factor => 2); +psql:include/cagg_ddl_common.sql:1129: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (39,public,test_setting,t) @@ -1860,6 +1879,7 @@ CREATE TABLE transactions ); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('transactions', 'time', replication_factor => 2); +psql:include/cagg_ddl_common.sql:1235: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (46,public,transactions,t) @@ -1916,15 +1936,15 @@ WHERE user_view_name = 'cashflows' cashflow | bigint | | | | plain | cashflow2 | bigint | | | | plain | View definition: - SELECT time_bucket('@ 1 day'::interval, "time") AS bucket, - amount, + SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, + transactions.amount, CASE - WHEN amount < 0 THEN 0 - sum(fiat_value) - ELSE sum(fiat_value) + WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) + ELSE sum(transactions.fiat_value) END AS cashflow, - amount + sum(fiat_value) AS cashflow2 + transactions.amount + sum(transactions.fiat_value) AS cashflow2 FROM transactions - GROUP BY (time_bucket('@ 1 day'::interval, "time")), amount; + GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; \d+ "_timescaledb_internal".:"PART_VIEW_NAME" View "_timescaledb_internal._partial_view_47" @@ -1935,15 +1955,15 @@ View definition: cashflow | bigint | | | | plain | cashflow2 | bigint | | | | plain | View definition: - SELECT time_bucket('@ 1 day'::interval, "time") AS bucket, - amount, + SELECT time_bucket('@ 1 day'::interval, transactions."time") AS bucket, + transactions.amount, CASE - WHEN amount < 0 THEN 0 - sum(fiat_value) - ELSE sum(fiat_value) + WHEN transactions.amount < 0 THEN 0 - sum(transactions.fiat_value) + ELSE sum(transactions.fiat_value) END AS cashflow, - amount + sum(fiat_value) AS cashflow2 + transactions.amount + sum(transactions.fiat_value) AS cashflow2 FROM transactions - GROUP BY (time_bucket('@ 1 day'::interval, "time")), amount; + GROUP BY (time_bucket('@ 1 day'::interval, transactions."time")), transactions.amount; \d+ "_timescaledb_internal".:"MAT_TABLE_NAME" Table "_timescaledb_internal._materialized_hypertable_47" @@ -1970,10 +1990,10 @@ Child tables: _timescaledb_internal._hyper_47_52_chunk, cashflow | bigint | | | | plain | cashflow2 | bigint | | | | plain | View definition: - SELECT bucket, - amount, - cashflow, - cashflow2 + SELECT _materialized_hypertable_47.bucket, + _materialized_hypertable_47.amount, + _materialized_hypertable_47.cashflow, + _materialized_hypertable_47.cashflow2 FROM _timescaledb_internal._materialized_hypertable_47; SELECT * FROM cashflows; @@ -2098,6 +2118,7 @@ CREATE TABLE conditions ( ); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('conditions', 'time', replication_factor => 2); +psql:include/cagg_ddl_common.sql:1378: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (54,public,conditions,t) @@ -2133,9 +2154,9 @@ WITH NO DATA; bucket | timestamp with time zone | | | | plain | avg | double precision | | | | plain | View definition: - SELECT location, - bucket, - avg + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg FROM _timescaledb_internal._materialized_hypertable_55; -- Should return NO ROWS @@ -2188,9 +2209,9 @@ ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=true bucket | timestamp with time zone | | | | plain | avg | double precision | | | | plain | View definition: - SELECT location, - bucket, - avg + SELECT _materialized_hypertable_55.location, + _materialized_hypertable_55.bucket, + _materialized_hypertable_55.avg FROM _timescaledb_internal._materialized_hypertable_55; CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); diff --git a/tsl/test/expected/cagg_invalidation_dist_ht-13.out b/tsl/test/expected/cagg_invalidation_dist_ht-13.out index 2997dcf1d2a..f250cb23a80 100644 --- a/tsl/test/expected/cagg_invalidation_dist_ht-13.out +++ b/tsl/test/expected/cagg_invalidation_dist_ht-13.out @@ -28,6 +28,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created --------------------------------+--------------------------------+--------------+------------------+------------------- db_cagg_invalidation_dist_ht_1 | db_cagg_invalidation_dist_ht_1 | t | t | t @@ -56,6 +59,7 @@ SET timezone TO 'UTC'; CREATE TABLE conditions (time bigint NOT NULL, device int, temp float); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_invalidation_common.sql:14: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (1,public,conditions,t) @@ -67,6 +71,7 @@ SELECT create_hypertable('conditions', 'time', chunk_time_interval => 10); CREATE TABLE measurements (time int NOT NULL, device int, temp float); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('measurements', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_invalidation_common.sql:21: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (2,public,measurements,t) @@ -273,6 +278,16 @@ ORDER BY 1,2; -- There should be only "infinite" invalidations in the cagg -- invalidation log: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | 9223372036854775807 @@ -292,6 +307,19 @@ ORDER BY 1,2; -- Invalidations should be cleared inside the refresh window: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | 9 @@ -303,6 +331,19 @@ SELECT * FROM cagg_invals; -- Refresh up to 50 from the beginning CALL refresh_continuous_aggregate('cond_10', 0, 50); SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -325,6 +366,19 @@ ORDER BY 1,2; -- Nothing changes with invalidations either since the region was -- already refreshed and no new invalidations have been generated: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -344,6 +398,22 @@ ORDER BY 1,2; (2 rows) SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -365,6 +435,25 @@ ORDER BY 1,2; (2 rows) SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -377,11 +466,31 @@ SELECT * FROM cagg_invals; -- There should be no hypertable invalidations initially: SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:229: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- (0 rows) SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -402,6 +511,14 @@ INSERT INTO conditions VALUES (10, 5, 23.8), (19, 3, 23.6); INSERT INTO conditions VALUES (60, 3, 23.7), (70, 4, 23.7); -- Should see some invaliations in the hypertable invalidation log: SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 1 | 10 | 10 @@ -416,6 +533,18 @@ INSERT INTO measurements VALUES (20, 4, 23.7); INSERT INTO measurements VALUES (30, 5, 23.8), (80, 3, 23.6); -- Should now see invalidations for both hypertables SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 1 | 10 | 10 @@ -444,6 +573,11 @@ ORDER BY 1,2; -- to the continuous aggregate log, but only for the hypertable that -- the refreshed aggregate belongs to: SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 2 | 20 | 20 @@ -451,6 +585,32 @@ SELECT * FROM hyper_invals; (2 rows) SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -482,6 +642,33 @@ INSERT INTO conditions VALUES (60, 3, 23.6), (90, 3, 23.6); INSERT INTO conditions VALUES (20, 5, 23.8), (100, 3, 23.6); -- New invalidations in the hypertable invalidation log: SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 1 | 1 | 1 @@ -507,6 +694,32 @@ SELECT * FROM hyper_invals; -- But nothing has yet changed in the cagg invalidation log: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -525,6 +738,11 @@ CALL refresh_continuous_aggregate('cond_10', 20, 60); -- Invalidations should be moved from the hypertable invalidation log -- to the continuous aggregate log. SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 2 | 20 | 20 @@ -533,6 +751,34 @@ SELECT * FROM hyper_invals; -- Only the cond_10 cagg should have its entries cut: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -553,6 +799,26 @@ SELECT * FROM cagg_invals; CALL refresh_continuous_aggregate('cond_20', 20, 60); -- The cond_20 cagg should also have its entries cut: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -569,6 +835,25 @@ SELECT * FROM cagg_invals; CALL refresh_continuous_aggregate('cond_10', 0, 20); -- The 1-19 invalidation should be deleted: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -587,6 +872,30 @@ INSERT INTO conditions VALUES (20, 1, 23.4), (25, 1, 23.4); INSERT INTO conditions VALUES (30, 1, 23.4), (46, 1, 23.4); CALL refresh_continuous_aggregate('cond_10', 1, 40); SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -606,6 +915,32 @@ INSERT INTO conditions VALUES (15, 1, 23.4), (42, 1, 23.4); CALL refresh_continuous_aggregate('cond_10', 90, 100); psql:include/cagg_invalidation_common.sql:327: NOTICE: continuous aggregate "cond_10" is already up-to-date SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -623,6 +958,27 @@ SELECT * FROM cagg_invals; -- Test max refresh window CALL refresh_continuous_aggregate('cond_10', NULL, NULL); SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | 110 | 9223372036854775807 @@ -636,6 +992,11 @@ SELECT * FROM cagg_invals; (8 rows) SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 2 | 20 | 20 @@ -678,6 +1039,11 @@ psql:include/cagg_invalidation_common.sql:352: ERROR: "_dist_hyper_1_1_chunk" i \endif -- Should see new invalidation entries for conditions for the non-distributed case SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 2 | 20 | 20 @@ -694,6 +1060,14 @@ SELECT * FROM conditions; -- Should see an infinite invalidation entry for conditions SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+----------------------+--------------------- 1 | -9223372036854775808 | 9223372036854775807 @@ -859,6 +1233,13 @@ SELECT _timescaledb_functions.invalidation_cagg_log_add_entry(:cond_1_id, 1, 0); -- Test invalidations with bucket size 1 INSERT INTO conditions VALUES (0, 1, 1.0); SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 1 | 0 | 0 @@ -885,6 +1266,32 @@ ORDER BY 1,2; CALL refresh_continuous_aggregate('cond_1', 0, 1); SELECT * FROM cagg_invals WHERE cagg_id = :cond_1_id; +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 6 | -9223372036854775808 | -2 @@ -980,6 +1387,75 @@ ORDER BY 1,2; -- Should leave one invalidation on each side of the refresh window SELECT * FROM cagg_invals WHERE cagg_id = :cond_1_id; +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+-------+--------------------- 6 | 0 | 0 @@ -1029,6 +1505,7 @@ ORDER BY 1,2; CREATE table threshold_test (time int, value int); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('threshold_test', 'time', chunk_time_interval => 4, replication_factor => 2); +psql:include/cagg_invalidation_common.sql:565: WARNING: distributed hypertable is deprecated psql:include/cagg_invalidation_common.sql:565: NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -1122,6 +1599,85 @@ ORDER BY 1,2; -- threshold. SELECT * FROM cagg_invals WHERE cagg_id = :thresh_cagg_id; +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 8 | -9223372036854775808 | -1 @@ -1229,6 +1785,85 @@ ORDER BY 1,2; -- The aggregate remains invalid beyond the invalidation threshold SELECT * FROM cagg_invals WHERE cagg_id = :thresh_cagg_id; +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 8 | -9223372036854775808 | -1 @@ -1311,6 +1946,99 @@ CALL refresh_continuous_aggregate('cond_1', 10, NULL); psql:include/cagg_invalidation_common.sql:748: NOTICE: continuous aggregate "cond_1" is already up-to-date SELECT * FROM cagg_invals WHERE cagg_id = :cond_1_id; +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+-------+--------------------- 6 | 1 | 4 @@ -1340,6 +2068,88 @@ FROM _timescaledb_catalog.continuous_agg WHERE user_view_name = 'cond_10' \gset SELECT * FROM cagg_invals WHERE cagg_id = :cond_10_id; +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -21 diff --git a/tsl/test/expected/cagg_invalidation_dist_ht-14.out b/tsl/test/expected/cagg_invalidation_dist_ht-14.out index 10cfcc6976f..d567c9b209f 100644 --- a/tsl/test/expected/cagg_invalidation_dist_ht-14.out +++ b/tsl/test/expected/cagg_invalidation_dist_ht-14.out @@ -28,6 +28,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created --------------------------------+--------------------------------+--------------+------------------+------------------- db_cagg_invalidation_dist_ht_1 | db_cagg_invalidation_dist_ht_1 | t | t | t @@ -56,6 +59,7 @@ SET timezone TO 'UTC'; CREATE TABLE conditions (time bigint NOT NULL, device int, temp float); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_invalidation_common.sql:14: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (1,public,conditions,t) @@ -67,6 +71,7 @@ SELECT create_hypertable('conditions', 'time', chunk_time_interval => 10); CREATE TABLE measurements (time int NOT NULL, device int, temp float); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('measurements', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_invalidation_common.sql:21: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (2,public,measurements,t) @@ -273,6 +278,16 @@ ORDER BY 1,2; -- There should be only "infinite" invalidations in the cagg -- invalidation log: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | 9223372036854775807 @@ -292,6 +307,19 @@ ORDER BY 1,2; -- Invalidations should be cleared inside the refresh window: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | 9 @@ -303,6 +331,19 @@ SELECT * FROM cagg_invals; -- Refresh up to 50 from the beginning CALL refresh_continuous_aggregate('cond_10', 0, 50); SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -325,6 +366,19 @@ ORDER BY 1,2; -- Nothing changes with invalidations either since the region was -- already refreshed and no new invalidations have been generated: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -344,6 +398,22 @@ ORDER BY 1,2; (2 rows) SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -365,6 +435,25 @@ ORDER BY 1,2; (2 rows) SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -377,11 +466,31 @@ SELECT * FROM cagg_invals; -- There should be no hypertable invalidations initially: SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:229: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- (0 rows) SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -402,6 +511,14 @@ INSERT INTO conditions VALUES (10, 5, 23.8), (19, 3, 23.6); INSERT INTO conditions VALUES (60, 3, 23.7), (70, 4, 23.7); -- Should see some invaliations in the hypertable invalidation log: SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 1 | 10 | 10 @@ -416,6 +533,18 @@ INSERT INTO measurements VALUES (20, 4, 23.7); INSERT INTO measurements VALUES (30, 5, 23.8), (80, 3, 23.6); -- Should now see invalidations for both hypertables SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 1 | 10 | 10 @@ -444,6 +573,11 @@ ORDER BY 1,2; -- to the continuous aggregate log, but only for the hypertable that -- the refreshed aggregate belongs to: SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 2 | 20 | 20 @@ -451,6 +585,32 @@ SELECT * FROM hyper_invals; (2 rows) SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -482,6 +642,33 @@ INSERT INTO conditions VALUES (60, 3, 23.6), (90, 3, 23.6); INSERT INTO conditions VALUES (20, 5, 23.8), (100, 3, 23.6); -- New invalidations in the hypertable invalidation log: SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 1 | 1 | 1 @@ -507,6 +694,32 @@ SELECT * FROM hyper_invals; -- But nothing has yet changed in the cagg invalidation log: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -525,6 +738,11 @@ CALL refresh_continuous_aggregate('cond_10', 20, 60); -- Invalidations should be moved from the hypertable invalidation log -- to the continuous aggregate log. SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 2 | 20 | 20 @@ -533,6 +751,34 @@ SELECT * FROM hyper_invals; -- Only the cond_10 cagg should have its entries cut: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -553,6 +799,26 @@ SELECT * FROM cagg_invals; CALL refresh_continuous_aggregate('cond_20', 20, 60); -- The cond_20 cagg should also have its entries cut: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -569,6 +835,25 @@ SELECT * FROM cagg_invals; CALL refresh_continuous_aggregate('cond_10', 0, 20); -- The 1-19 invalidation should be deleted: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -587,6 +872,30 @@ INSERT INTO conditions VALUES (20, 1, 23.4), (25, 1, 23.4); INSERT INTO conditions VALUES (30, 1, 23.4), (46, 1, 23.4); CALL refresh_continuous_aggregate('cond_10', 1, 40); SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -606,6 +915,32 @@ INSERT INTO conditions VALUES (15, 1, 23.4), (42, 1, 23.4); CALL refresh_continuous_aggregate('cond_10', 90, 100); psql:include/cagg_invalidation_common.sql:327: NOTICE: continuous aggregate "cond_10" is already up-to-date SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -623,6 +958,27 @@ SELECT * FROM cagg_invals; -- Test max refresh window CALL refresh_continuous_aggregate('cond_10', NULL, NULL); SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | 110 | 9223372036854775807 @@ -636,6 +992,11 @@ SELECT * FROM cagg_invals; (8 rows) SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 2 | 20 | 20 @@ -678,6 +1039,11 @@ psql:include/cagg_invalidation_common.sql:352: ERROR: cannot truncate foreign t \endif -- Should see new invalidation entries for conditions for the non-distributed case SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 2 | 20 | 20 @@ -694,6 +1060,14 @@ SELECT * FROM conditions; -- Should see an infinite invalidation entry for conditions SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+----------------------+--------------------- 1 | -9223372036854775808 | 9223372036854775807 @@ -859,6 +1233,13 @@ SELECT _timescaledb_functions.invalidation_cagg_log_add_entry(:cond_1_id, 1, 0); -- Test invalidations with bucket size 1 INSERT INTO conditions VALUES (0, 1, 1.0); SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 1 | 0 | 0 @@ -885,6 +1266,32 @@ ORDER BY 1,2; CALL refresh_continuous_aggregate('cond_1', 0, 1); SELECT * FROM cagg_invals WHERE cagg_id = :cond_1_id; +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 6 | -9223372036854775808 | -2 @@ -980,6 +1387,75 @@ ORDER BY 1,2; -- Should leave one invalidation on each side of the refresh window SELECT * FROM cagg_invals WHERE cagg_id = :cond_1_id; +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+-------+--------------------- 6 | 0 | 0 @@ -1029,6 +1505,7 @@ ORDER BY 1,2; CREATE table threshold_test (time int, value int); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('threshold_test', 'time', chunk_time_interval => 4, replication_factor => 2); +psql:include/cagg_invalidation_common.sql:565: WARNING: distributed hypertable is deprecated psql:include/cagg_invalidation_common.sql:565: NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -1122,6 +1599,85 @@ ORDER BY 1,2; -- threshold. SELECT * FROM cagg_invals WHERE cagg_id = :thresh_cagg_id; +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 8 | -9223372036854775808 | -1 @@ -1229,6 +1785,85 @@ ORDER BY 1,2; -- The aggregate remains invalid beyond the invalidation threshold SELECT * FROM cagg_invals WHERE cagg_id = :thresh_cagg_id; +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 8 | -9223372036854775808 | -1 @@ -1311,6 +1946,99 @@ CALL refresh_continuous_aggregate('cond_1', 10, NULL); psql:include/cagg_invalidation_common.sql:748: NOTICE: continuous aggregate "cond_1" is already up-to-date SELECT * FROM cagg_invals WHERE cagg_id = :cond_1_id; +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+-------+--------------------- 6 | 1 | 4 @@ -1340,6 +2068,88 @@ FROM _timescaledb_catalog.continuous_agg WHERE user_view_name = 'cond_10' \gset SELECT * FROM cagg_invals WHERE cagg_id = :cond_10_id; +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -21 diff --git a/tsl/test/expected/cagg_invalidation_dist_ht-15.out b/tsl/test/expected/cagg_invalidation_dist_ht-15.out index 10cfcc6976f..d567c9b209f 100644 --- a/tsl/test/expected/cagg_invalidation_dist_ht-15.out +++ b/tsl/test/expected/cagg_invalidation_dist_ht-15.out @@ -28,6 +28,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created --------------------------------+--------------------------------+--------------+------------------+------------------- db_cagg_invalidation_dist_ht_1 | db_cagg_invalidation_dist_ht_1 | t | t | t @@ -56,6 +59,7 @@ SET timezone TO 'UTC'; CREATE TABLE conditions (time bigint NOT NULL, device int, temp float); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_invalidation_common.sql:14: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (1,public,conditions,t) @@ -67,6 +71,7 @@ SELECT create_hypertable('conditions', 'time', chunk_time_interval => 10); CREATE TABLE measurements (time int NOT NULL, device int, temp float); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('measurements', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_invalidation_common.sql:21: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (2,public,measurements,t) @@ -273,6 +278,16 @@ ORDER BY 1,2; -- There should be only "infinite" invalidations in the cagg -- invalidation log: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:192: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | 9223372036854775807 @@ -292,6 +307,19 @@ ORDER BY 1,2; -- Invalidations should be cleared inside the refresh window: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:200: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | 9 @@ -303,6 +331,19 @@ SELECT * FROM cagg_invals; -- Refresh up to 50 from the beginning CALL refresh_continuous_aggregate('cond_10', 0, 50); SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:204: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -325,6 +366,19 @@ ORDER BY 1,2; -- Nothing changes with invalidations either since the region was -- already refreshed and no new invalidations have been generated: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:213: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -344,6 +398,22 @@ ORDER BY 1,2; (2 rows) SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:219: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -365,6 +435,25 @@ ORDER BY 1,2; (2 rows) SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:226: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -377,11 +466,31 @@ SELECT * FROM cagg_invals; -- There should be no hypertable invalidations initially: SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:229: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- (0 rows) SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:230: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -402,6 +511,14 @@ INSERT INTO conditions VALUES (10, 5, 23.8), (19, 3, 23.6); INSERT INTO conditions VALUES (60, 3, 23.7), (70, 4, 23.7); -- Should see some invaliations in the hypertable invalidation log: SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:243: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 1 | 10 | 10 @@ -416,6 +533,18 @@ INSERT INTO measurements VALUES (20, 4, 23.7); INSERT INTO measurements VALUES (30, 5, 23.8), (80, 3, 23.6); -- Should now see invalidations for both hypertables SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:250: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 1 | 10 | 10 @@ -444,6 +573,11 @@ ORDER BY 1,2; -- to the continuous aggregate log, but only for the hypertable that -- the refreshed aggregate belongs to: SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:263: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 2 | 20 | 20 @@ -451,6 +585,32 @@ SELECT * FROM hyper_invals; (2 rows) SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:264: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -482,6 +642,33 @@ INSERT INTO conditions VALUES (60, 3, 23.6), (90, 3, 23.6); INSERT INTO conditions VALUES (20, 5, 23.8), (100, 3, 23.6); -- New invalidations in the hypertable invalidation log: SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:284: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 1 | 1 | 1 @@ -507,6 +694,32 @@ SELECT * FROM hyper_invals; -- But nothing has yet changed in the cagg invalidation log: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:287: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -525,6 +738,11 @@ CALL refresh_continuous_aggregate('cond_10', 20, 60); -- Invalidations should be moved from the hypertable invalidation log -- to the continuous aggregate log. SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:294: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 2 | 20 | 20 @@ -533,6 +751,34 @@ SELECT * FROM hyper_invals; -- Only the cond_10 cagg should have its entries cut: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:297: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -553,6 +799,26 @@ SELECT * FROM cagg_invals; CALL refresh_continuous_aggregate('cond_20', 20, 60); -- The cond_20 cagg should also have its entries cut: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:303: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -569,6 +835,25 @@ SELECT * FROM cagg_invals; CALL refresh_continuous_aggregate('cond_10', 0, 20); -- The 1-19 invalidation should be deleted: SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:309: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -587,6 +872,30 @@ INSERT INTO conditions VALUES (20, 1, 23.4), (25, 1, 23.4); INSERT INTO conditions VALUES (30, 1, 23.4), (46, 1, 23.4); CALL refresh_continuous_aggregate('cond_10', 1, 40); SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:321: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -606,6 +915,32 @@ INSERT INTO conditions VALUES (15, 1, 23.4), (42, 1, 23.4); CALL refresh_continuous_aggregate('cond_10', 90, 100); psql:include/cagg_invalidation_common.sql:327: NOTICE: continuous aggregate "cond_10" is already up-to-date SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:329: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -1 @@ -623,6 +958,27 @@ SELECT * FROM cagg_invals; -- Test max refresh window CALL refresh_continuous_aggregate('cond_10', NULL, NULL); SELECT * FROM cagg_invals; +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:334: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | 110 | 9223372036854775807 @@ -636,6 +992,11 @@ SELECT * FROM cagg_invals; (8 rows) SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:335: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 2 | 20 | 20 @@ -678,6 +1039,11 @@ psql:include/cagg_invalidation_common.sql:352: ERROR: cannot truncate foreign t \endif -- Should see new invalidation entries for conditions for the non-distributed case SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:358: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 2 | 20 | 20 @@ -694,6 +1060,14 @@ SELECT * FROM conditions; -- Should see an infinite invalidation entry for conditions SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:367: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+----------------------+--------------------- 1 | -9223372036854775808 | 9223372036854775807 @@ -859,6 +1233,13 @@ SELECT _timescaledb_functions.invalidation_cagg_log_add_entry(:cond_1_id, 1, 0); -- Test invalidations with bucket size 1 INSERT INTO conditions VALUES (0, 1, 1.0); SELECT * FROM hyper_invals; +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:463: WARNING: getting results from remote command execution is deprecated hyper_id | start | end ----------+-------+----- 1 | 0 | 0 @@ -885,6 +1266,32 @@ ORDER BY 1,2; CALL refresh_continuous_aggregate('cond_1', 0, 1); SELECT * FROM cagg_invals WHERE cagg_id = :cond_1_id; +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:477: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 6 | -9223372036854775808 | -2 @@ -980,6 +1387,75 @@ ORDER BY 1,2; -- Should leave one invalidation on each side of the refresh window SELECT * FROM cagg_invals WHERE cagg_id = :cond_1_id; +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:538: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+-------+--------------------- 6 | 0 | 0 @@ -1029,6 +1505,7 @@ ORDER BY 1,2; CREATE table threshold_test (time int, value int); \if :IS_DISTRIBUTED SELECT create_distributed_hypertable('threshold_test', 'time', chunk_time_interval => 4, replication_factor => 2); +psql:include/cagg_invalidation_common.sql:565: WARNING: distributed hypertable is deprecated psql:include/cagg_invalidation_common.sql:565: NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -1122,6 +1599,85 @@ ORDER BY 1,2; -- threshold. SELECT * FROM cagg_invals WHERE cagg_id = :thresh_cagg_id; +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:639: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 8 | -9223372036854775808 | -1 @@ -1229,6 +1785,85 @@ ORDER BY 1,2; -- The aggregate remains invalid beyond the invalidation threshold SELECT * FROM cagg_invals WHERE cagg_id = :thresh_cagg_id; +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:695: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 8 | -9223372036854775808 | -1 @@ -1311,6 +1946,99 @@ CALL refresh_continuous_aggregate('cond_1', 10, NULL); psql:include/cagg_invalidation_common.sql:748: NOTICE: continuous aggregate "cond_1" is already up-to-date SELECT * FROM cagg_invals WHERE cagg_id = :cond_1_id; +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:750: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+-------+--------------------- 6 | 1 | 4 @@ -1340,6 +2068,88 @@ FROM _timescaledb_catalog.continuous_agg WHERE user_view_name = 'cond_10' \gset SELECT * FROM cagg_invals WHERE cagg_id = :cond_10_id; +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated +psql:include/cagg_invalidation_common.sql:776: WARNING: getting results from remote command execution is deprecated cagg_id | start | end ---------+----------------------+--------------------- 3 | -9223372036854775808 | -21 diff --git a/tsl/test/expected/cagg_migrate_dist_ht.out b/tsl/test/expected/cagg_migrate_dist_ht.out index ba4e0139765..db00d43dc9a 100644 --- a/tsl/test/expected/cagg_migrate_dist_ht.out +++ b/tsl/test/expected/cagg_migrate_dist_ht.out @@ -28,6 +28,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ---------------------------+---------------------------+--------------+------------------+------------------- db_cagg_migrate_dist_ht_1 | db_cagg_migrate_dist_ht_1 | t | t | t @@ -62,6 +65,7 @@ CREATE TABLE conditions ( SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); \else SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_migrate_common.sql:22: WARNING: distributed hypertable is deprecated table_name ------------ conditions @@ -768,6 +772,7 @@ CREATE TABLE conditions ( \if :IS_DISTRIBUTED \if :IS_TIME_DIMENSION SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); +psql:include/cagg_migrate_common.sql:20: WARNING: distributed hypertable is deprecated psql:include/cagg_migrate_common.sql:20: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices table_name ------------ @@ -1448,6 +1453,7 @@ CREATE TABLE conditions ( \if :IS_DISTRIBUTED \if :IS_TIME_DIMENSION SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); +psql:include/cagg_migrate_common.sql:20: WARNING: distributed hypertable is deprecated table_name ------------ conditions diff --git a/tsl/test/expected/cagg_on_cagg_dist_ht.out b/tsl/test/expected/cagg_on_cagg_dist_ht.out index 4d8d153b944..5b595dcebb5 100644 --- a/tsl/test/expected/cagg_on_cagg_dist_ht.out +++ b/tsl/test/expected/cagg_on_cagg_dist_ht.out @@ -26,6 +26,9 @@ AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' LANGUAGE C; SELECT (add_data_node (name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v (name); +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | host | port | database | node_created | database_created | extension_created ---------------------------+-----------+-------+---------------------------+--------------+------------------+------------------- db_cagg_on_cagg_dist_ht_1 | localhost | 55432 | db_cagg_on_cagg_dist_ht_1 | t | t | t @@ -96,6 +99,7 @@ psql:include/cagg_on_cagg_setup.sql:14: NOTICE: table "conditions" does not exi SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); \else SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_on_cagg_setup.sql:39: WARNING: distributed hypertable is deprecated table_name ------------ conditions @@ -513,6 +517,7 @@ DROP TABLE IF EXISTS conditions CASCADE; SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); \else SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_on_cagg_setup.sql:39: WARNING: distributed hypertable is deprecated table_name ------------ conditions @@ -1261,6 +1266,7 @@ psql:include/cagg_on_cagg_setup.sql:14: NOTICE: table "conditions" does not exi \if :IS_DISTRIBUTED \if :IS_TIME_DIMENSION SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); +psql:include/cagg_on_cagg_setup.sql:37: WARNING: distributed hypertable is deprecated psql:include/cagg_on_cagg_setup.sql:37: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices table_name ------------ @@ -1674,6 +1680,7 @@ DROP TABLE IF EXISTS conditions CASCADE; \if :IS_DISTRIBUTED \if :IS_TIME_DIMENSION SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); +psql:include/cagg_on_cagg_setup.sql:37: WARNING: distributed hypertable is deprecated psql:include/cagg_on_cagg_setup.sql:37: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices table_name ------------ @@ -2518,6 +2525,7 @@ DROP TABLE IF EXISTS conditions CASCADE; \if :IS_DISTRIBUTED \if :IS_TIME_DIMENSION SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); +psql:include/cagg_on_cagg_setup.sql:37: WARNING: distributed hypertable is deprecated table_name ------------ conditions @@ -2930,6 +2938,7 @@ DROP TABLE IF EXISTS conditions CASCADE; \if :IS_DISTRIBUTED \if :IS_TIME_DIMENSION SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); +psql:include/cagg_on_cagg_setup.sql:37: WARNING: distributed hypertable is deprecated table_name ------------ conditions diff --git a/tsl/test/expected/cagg_on_cagg_joins_dist_ht.out b/tsl/test/expected/cagg_on_cagg_joins_dist_ht.out index 537e237237e..55678a675cb 100644 --- a/tsl/test/expected/cagg_on_cagg_joins_dist_ht.out +++ b/tsl/test/expected/cagg_on_cagg_joins_dist_ht.out @@ -26,6 +26,9 @@ AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' LANGUAGE C; SELECT (add_data_node (name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v (name); +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | host | port | database | node_created | database_created | extension_created ---------------------------------+-----------+-------+---------------------------------+--------------+------------------+------------------- db_cagg_on_cagg_joins_dist_ht_1 | localhost | 55432 | db_cagg_on_cagg_joins_dist_ht_1 | t | t | t @@ -96,6 +99,7 @@ psql:include/cagg_on_cagg_setup.sql:30: NOTICE: table "devices" does not exist, SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); \else SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_on_cagg_setup.sql:39: WARNING: distributed hypertable is deprecated table_name ------------ conditions @@ -523,6 +527,7 @@ DROP TABLE IF EXISTS conditions CASCADE; SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); \else SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); +psql:include/cagg_on_cagg_setup.sql:39: WARNING: distributed hypertable is deprecated table_name ------------ conditions @@ -1281,6 +1286,7 @@ psql:include/cagg_on_cagg_setup.sql:14: NOTICE: table "conditions" does not exi \if :IS_DISTRIBUTED \if :IS_TIME_DIMENSION SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); +psql:include/cagg_on_cagg_setup.sql:37: WARNING: distributed hypertable is deprecated psql:include/cagg_on_cagg_setup.sql:37: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices table_name ------------ @@ -1704,6 +1710,7 @@ DROP TABLE IF EXISTS conditions CASCADE; \if :IS_DISTRIBUTED \if :IS_TIME_DIMENSION SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); +psql:include/cagg_on_cagg_setup.sql:37: WARNING: distributed hypertable is deprecated psql:include/cagg_on_cagg_setup.sql:37: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices table_name ------------ @@ -2558,6 +2565,7 @@ DROP TABLE IF EXISTS conditions CASCADE; \if :IS_DISTRIBUTED \if :IS_TIME_DIMENSION SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); +psql:include/cagg_on_cagg_setup.sql:37: WARNING: distributed hypertable is deprecated table_name ------------ conditions @@ -2980,6 +2988,7 @@ DROP TABLE IF EXISTS conditions CASCADE; \if :IS_DISTRIBUTED \if :IS_TIME_DIMENSION SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); +psql:include/cagg_on_cagg_setup.sql:37: WARNING: distributed hypertable is deprecated table_name ------------ conditions diff --git a/tsl/test/expected/chunk_api.out b/tsl/test/expected/chunk_api.out index 5e07f28e5cb..1051f479e22 100644 --- a/tsl/test/expected/chunk_api.out +++ b/tsl/test/expected/chunk_api.out @@ -2,23 +2,6 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. \c :TEST_DBNAME :ROLE_SUPERUSER -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 -\ir include/remote_exec.sql --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. -CREATE SCHEMA IF NOT EXISTS test; -psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping -GRANT USAGE ON SCHEMA test TO PUBLIC; -CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) -RETURNS VOID -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' -LANGUAGE C; -CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) -RETURNS TABLE("table_record" CSTRING[]) -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' -LANGUAGE C; GRANT CREATE ON DATABASE :"TEST_DBNAME" TO :ROLE_DEFAULT_PERM_USER; SET ROLE :ROLE_DEFAULT_PERM_USER; CREATE SCHEMA "ChunkSchema"; @@ -270,301 +253,6 @@ ORDER BY tablename, attname; _hyper_1_1_chunk | time | f | 0 | 8 | -1 (4 rows) --- Test getting chunk stats on a distribute hypertable -SET ROLE :ROLE_CLUSTER_SUPERUSER; -SELECT node_name, database, node_created, database_created, extension_created -FROM ( - SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* - FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2')) v(name) -) a; - node_name | database | node_created | database_created | extension_created -----------------+----------------+--------------+------------------+------------------- - db_chunk_api_1 | db_chunk_api_1 | t | t | t - db_chunk_api_2 | db_chunk_api_2 | t | t | t -(2 rows) - -GRANT USAGE - ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2 - TO :ROLE_1, :ROLE_DEFAULT_PERM_USER; --- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes -GRANT CREATE ON SCHEMA public TO :ROLE_1; -SET ROLE :ROLE_1; -CREATE TABLE disttable (time timestamptz, device int, temp float, color text); -SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device'); -NOTICE: adding not-null constraint to column "time" - hypertable_id | schema_name | table_name | created ----------------+-------------+------------+--------- - 2 | public | disttable | t -(1 row) - -INSERT INTO disttable VALUES ('2018-01-01 05:00:00-8', 1, 23.4, 'green'), - ('2018-01-01 06:00:00-8', 4, 22.3, NULL), - ('2018-01-01 06:00:00-8', 1, 21.1, 'green'); --- Make sure we get deterministic behavior across all nodes -CALL distributed_exec($$ SELECT setseed(1); $$); --- No stats on the local table -SELECT * FROM _timescaledb_functions.get_chunk_relstats('disttable'); - chunk_id | hypertable_id | num_pages | num_tuples | num_allvisible -----------+---------------+-----------+------------+---------------- - 3 | 2 | 0 | 0 | 0 - 4 | 2 | 0 | 0 | 0 -(2 rows) - -SELECT * FROM _timescaledb_functions.get_chunk_colstats('disttable'); - chunk_id | hypertable_id | att_num | nullfrac | width | distinctval | slotkind | slotopstrings | slotcollations | slot1numbers | slot2numbers | slot3numbers | slot4numbers | slot5numbers | slotvaluetypetrings | slot1values | slot2values | slot3values | slot4values | slot5values -----------+---------------+---------+----------+-------+-------------+----------+---------------+----------------+--------------+--------------+--------------+--------------+--------------+---------------------+-------------+-------------+-------------+-------------+------------- -(0 rows) - -SELECT relname, reltuples, relpages, relallvisible FROM pg_class WHERE relname IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY relname; - relname | reltuples | relpages | relallvisible ------------------------+-----------+----------+--------------- - _dist_hyper_2_3_chunk | 0 | 0 | 0 - _dist_hyper_2_4_chunk | 0 | 0 | 0 -(2 rows) - -SELECT * FROM pg_stats WHERE tablename IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY 1,2,3; - schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram -------------+-----------+---------+-----------+-----------+-----------+------------+------------------+-------------------+------------------+-------------+-------------------+------------------------+---------------------- -(0 rows) - --- Run ANALYZE on data node 1 -CALL distributed_exec('ANALYZE disttable', ARRAY[:'DATA_NODE_1']); --- Stats should now be refreshed after running get_chunk_{col,rel}stats -SELECT relname, reltuples, relpages, relallvisible FROM pg_class WHERE relname IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY relname; - relname | reltuples | relpages | relallvisible ------------------------+-----------+----------+--------------- - _dist_hyper_2_3_chunk | 0 | 0 | 0 - _dist_hyper_2_4_chunk | 0 | 0 | 0 -(2 rows) - -SELECT * FROM pg_stats WHERE tablename IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY 1,2,3; - schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram -------------+-----------+---------+-----------+-----------+-----------+------------+------------------+-------------------+------------------+-------------+-------------------+------------------------+---------------------- -(0 rows) - -SELECT * FROM _timescaledb_functions.get_chunk_relstats('disttable'); - chunk_id | hypertable_id | num_pages | num_tuples | num_allvisible -----------+---------------+-----------+------------+---------------- - 3 | 2 | 1 | 2 | 0 - 4 | 2 | 0 | 0 | 0 -(2 rows) - -SELECT * FROM _timescaledb_functions.get_chunk_colstats('disttable'); - chunk_id | hypertable_id | att_num | nullfrac | width | distinctval | slotkind | slotopstrings | slotcollations | slot1numbers | slot2numbers | slot3numbers | slot4numbers | slot5numbers | slotvaluetypetrings | slot1values | slot2values | slot3values | slot4values | slot5values -----------+---------------+---------+----------+-------+-------------+-------------+-------------------------------------------------------------------------------------------------------------------------+-----------------+--------------+--------------+--------------+--------------+--------------+--------------------------+-----------------------------------------------------------------+-------------+-------------+-------------+------------- - 3 | 2 | 1 | 0 | 8 | -1 | {2,3,0,0,0} | {<,pg_catalog,timestamptz,pg_catalog,timestamptz,pg_catalog,<,pg_catalog,timestamptz,pg_catalog,timestamptz,pg_catalog} | {0,0,0,0,0} | | {1} | | | | {timestamptz,pg_catalog} | {"Mon Jan 01 05:00:00 2018 PST","Mon Jan 01 06:00:00 2018 PST"} | | | | - 3 | 2 | 2 | 0 | 4 | -0.5 | {1,3,0,0,0} | {=,pg_catalog,int4,pg_catalog,int4,pg_catalog,<,pg_catalog,int4,pg_catalog,int4,pg_catalog} | {0,0,0,0,0} | {1} | {1} | | | | {int4,pg_catalog} | {1} | | | | - 3 | 2 | 3 | 0 | 8 | -1 | {2,3,0,0,0} | {<,pg_catalog,float8,pg_catalog,float8,pg_catalog,<,pg_catalog,float8,pg_catalog,float8,pg_catalog} | {0,0,0,0,0} | | {-1} | | | | {float8,pg_catalog} | {21.1,23.4} | | | | - 3 | 2 | 4 | 0 | 6 | -0.5 | {1,3,0,0,0} | {=,pg_catalog,text,pg_catalog,text,pg_catalog,<,pg_catalog,text,pg_catalog,text,pg_catalog} | {100,100,0,0,0} | {1} | {1} | | | | {text,pg_catalog} | {green} | | | | -(4 rows) - -SELECT relname, reltuples, relpages, relallvisible FROM pg_class WHERE relname IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY relname; - relname | reltuples | relpages | relallvisible ------------------------+-----------+----------+--------------- - _dist_hyper_2_3_chunk | 2 | 1 | 0 - _dist_hyper_2_4_chunk | 0 | 0 | 0 -(2 rows) - -SELECT * FROM pg_stats WHERE tablename IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY 1,2,3; - schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram ------------------------+-----------------------+---------+-----------+-----------+-----------+------------+------------------+-------------------+-----------------------------------------------------------------+-------------+-------------------+------------------------+---------------------- - _timescaledb_internal | _dist_hyper_2_3_chunk | color | f | 0 | 6 | -0.5 | {green} | {1} | | 1 | | | - _timescaledb_internal | _dist_hyper_2_3_chunk | device | f | 0 | 4 | -0.5 | {1} | {1} | | 1 | | | - _timescaledb_internal | _dist_hyper_2_3_chunk | temp | f | 0 | 8 | -1 | | | {21.1,23.4} | -1 | | | - _timescaledb_internal | _dist_hyper_2_3_chunk | time | f | 0 | 8 | -1 | | | {"Mon Jan 01 05:00:00 2018 PST","Mon Jan 01 06:00:00 2018 PST"} | 1 | | | -(4 rows) - --- Test that user without table permissions can't get column stats -SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT * FROM _timescaledb_functions.get_chunk_colstats('disttable'); - chunk_id | hypertable_id | att_num | nullfrac | width | distinctval | slotkind | slotopstrings | slotcollations | slot1numbers | slot2numbers | slot3numbers | slot4numbers | slot5numbers | slotvaluetypetrings | slot1values | slot2values | slot3values | slot4values | slot5values -----------+---------------+---------+----------+-------+-------------+----------+---------------+----------------+--------------+--------------+--------------+--------------+--------------+---------------------+-------------+-------------+-------------+-------------+------------- -(0 rows) - -SET ROLE :ROLE_1; --- Run ANALYZE again, but on both nodes. -ANALYZE disttable; --- Now expect stats from all data node chunks -SELECT * FROM _timescaledb_functions.get_chunk_relstats('disttable'); - chunk_id | hypertable_id | num_pages | num_tuples | num_allvisible -----------+---------------+-----------+------------+---------------- - 3 | 2 | 1 | 2 | 0 - 4 | 2 | 1 | 1 | 0 -(2 rows) - -SELECT * FROM _timescaledb_functions.get_chunk_colstats('disttable'); - chunk_id | hypertable_id | att_num | nullfrac | width | distinctval | slotkind | slotopstrings | slotcollations | slot1numbers | slot2numbers | slot3numbers | slot4numbers | slot5numbers | slotvaluetypetrings | slot1values | slot2values | slot3values | slot4values | slot5values -----------+---------------+---------+----------+-------+-------------+-------------+-------------------------------------------------------------------------------------------------------------------------+-----------------+--------------+--------------+--------------+--------------+--------------+--------------------------+-----------------------------------------------------------------+-------------+-------------+-------------+------------- - 3 | 2 | 1 | 0 | 8 | -1 | {2,3,0,0,0} | {<,pg_catalog,timestamptz,pg_catalog,timestamptz,pg_catalog,<,pg_catalog,timestamptz,pg_catalog,timestamptz,pg_catalog} | {0,0,0,0,0} | | {1} | | | | {timestamptz,pg_catalog} | {"Mon Jan 01 05:00:00 2018 PST","Mon Jan 01 06:00:00 2018 PST"} | | | | - 3 | 2 | 2 | 0 | 4 | -0.5 | {1,3,0,0,0} | {=,pg_catalog,int4,pg_catalog,int4,pg_catalog,<,pg_catalog,int4,pg_catalog,int4,pg_catalog} | {0,0,0,0,0} | {1} | {1} | | | | {int4,pg_catalog} | {1} | | | | - 3 | 2 | 3 | 0 | 8 | -1 | {2,3,0,0,0} | {<,pg_catalog,float8,pg_catalog,float8,pg_catalog,<,pg_catalog,float8,pg_catalog,float8,pg_catalog} | {0,0,0,0,0} | | {-1} | | | | {float8,pg_catalog} | {21.1,23.4} | | | | - 3 | 2 | 4 | 0 | 6 | -0.5 | {1,3,0,0,0} | {=,pg_catalog,text,pg_catalog,text,pg_catalog,<,pg_catalog,text,pg_catalog,text,pg_catalog} | {100,100,0,0,0} | {1} | {1} | | | | {text,pg_catalog} | {green} | | | | - 4 | 2 | 1 | 0 | 8 | -1 | {0,0,0,0,0} | {} | {0,0,0,0,0} | | | | | | {} | | | | | - 4 | 2 | 2 | 0 | 4 | -1 | {0,0,0,0,0} | {} | {0,0,0,0,0} | | | | | | {} | | | | | - 4 | 2 | 3 | 0 | 8 | -1 | {0,0,0,0,0} | {} | {0,0,0,0,0} | | | | | | {} | | | | | - 4 | 2 | 4 | 1 | 0 | 0 | {0,0,0,0,0} | {} | {0,0,0,0,0} | | | | | | {} | | | | | -(8 rows) - --- Test ANALYZE with a replica chunk. We'd like to ensure the --- stats-fetching functions handle duplicate stats from different (but --- identical) replica chunks. -SELECT set_replication_factor('disttable', 2); -WARNING: hypertable "disttable" is under-replicated - set_replication_factor ------------------------- - -(1 row) - -INSERT INTO disttable VALUES ('2019-01-01 05:00:00-8', 1, 23.4, 'green'); --- Run twice to test that stats-fetching functions handle replica chunks. -ANALYZE disttable; -ANALYZE disttable; -SELECT relname, reltuples, relpages, relallvisible FROM pg_class WHERE relname IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY relname; - relname | reltuples | relpages | relallvisible ------------------------+-----------+----------+--------------- - _dist_hyper_2_3_chunk | 2 | 1 | 0 - _dist_hyper_2_4_chunk | 1 | 1 | 0 - _dist_hyper_2_5_chunk | 1 | 1 | 0 -(3 rows) - -SELECT * FROM pg_stats WHERE tablename IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY 1,2,3; - schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram ------------------------+-----------------------+---------+-----------+-----------+-----------+------------+------------------+-------------------+-----------------------------------------------------------------+-------------+-------------------+------------------------+---------------------- - _timescaledb_internal | _dist_hyper_2_3_chunk | color | f | 0 | 6 | -0.5 | {green} | {1} | | 1 | | | - _timescaledb_internal | _dist_hyper_2_3_chunk | device | f | 0 | 4 | -0.5 | {1} | {1} | | 1 | | | - _timescaledb_internal | _dist_hyper_2_3_chunk | temp | f | 0 | 8 | -1 | | | {21.1,23.4} | -1 | | | - _timescaledb_internal | _dist_hyper_2_3_chunk | time | f | 0 | 8 | -1 | | | {"Mon Jan 01 05:00:00 2018 PST","Mon Jan 01 06:00:00 2018 PST"} | 1 | | | - _timescaledb_internal | _dist_hyper_2_4_chunk | color | f | 1 | 0 | 0 | | | | | | | - _timescaledb_internal | _dist_hyper_2_4_chunk | device | f | 0 | 4 | -1 | | | | | | | - _timescaledb_internal | _dist_hyper_2_4_chunk | temp | f | 0 | 8 | -1 | | | | | | | - _timescaledb_internal | _dist_hyper_2_4_chunk | time | f | 0 | 8 | -1 | | | | | | | - _timescaledb_internal | _dist_hyper_2_5_chunk | color | f | 0 | 6 | -1 | | | | | | | - _timescaledb_internal | _dist_hyper_2_5_chunk | device | f | 0 | 4 | -1 | | | | | | | - _timescaledb_internal | _dist_hyper_2_5_chunk | temp | f | 0 | 8 | -1 | | | | | | | - _timescaledb_internal | _dist_hyper_2_5_chunk | time | f | 0 | 8 | -1 | | | | | | | -(12 rows) - --- Check underlying pg_statistics table (looking at all columns except --- starelid, which changes depending on how many tests are run before --- this) -RESET ROLE; -SELECT ch, staattnum, stainherit, stanullfrac, stawidth, stadistinct, stakind1, stakind2, stakind3, stakind4, stakind5, staop1, staop2, staop3, staop4, staop5, -stanumbers1, stanumbers2, stanumbers3, stanumbers4, stanumbers5, stavalues1, stavalues2, stavalues3, stavalues4, stavalues5 -FROM pg_statistic st, show_chunks('disttable') ch -WHERE st.starelid = ch -ORDER BY ch, staattnum; - ch | staattnum | stainherit | stanullfrac | stawidth | stadistinct | stakind1 | stakind2 | stakind3 | stakind4 | stakind5 | staop1 | staop2 | staop3 | staop4 | staop5 | stanumbers1 | stanumbers2 | stanumbers3 | stanumbers4 | stanumbers5 | stavalues1 | stavalues2 | stavalues3 | stavalues4 | stavalues5 ----------------------------------------------+-----------+------------+-------------+----------+-------------+----------+----------+----------+----------+----------+--------+--------+--------+--------+--------+-------------+-------------+-------------+-------------+-------------+-----------------------------------------------------------------+------------+------------+------------+------------ - _timescaledb_internal._dist_hyper_2_3_chunk | 1 | f | 0 | 8 | -1 | 2 | 3 | 0 | 0 | 0 | 1322 | 1322 | 0 | 0 | 0 | | {1} | | | | {"Mon Jan 01 05:00:00 2018 PST","Mon Jan 01 06:00:00 2018 PST"} | | | | - _timescaledb_internal._dist_hyper_2_3_chunk | 2 | f | 0 | 4 | -0.5 | 1 | 3 | 0 | 0 | 0 | 96 | 97 | 0 | 0 | 0 | {1} | {1} | | | | {1} | | | | - _timescaledb_internal._dist_hyper_2_3_chunk | 3 | f | 0 | 8 | -1 | 2 | 3 | 0 | 0 | 0 | 672 | 672 | 0 | 0 | 0 | | {-1} | | | | {21.1,23.4} | | | | - _timescaledb_internal._dist_hyper_2_3_chunk | 4 | f | 0 | 6 | -0.5 | 1 | 3 | 0 | 0 | 0 | 98 | 664 | 0 | 0 | 0 | {1} | {1} | | | | {green} | | | | - _timescaledb_internal._dist_hyper_2_4_chunk | 1 | f | 0 | 8 | -1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | | | | | | | | | | - _timescaledb_internal._dist_hyper_2_4_chunk | 2 | f | 0 | 4 | -1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | | | | | | | | | | - _timescaledb_internal._dist_hyper_2_4_chunk | 3 | f | 0 | 8 | -1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | | | | | | | | | | - _timescaledb_internal._dist_hyper_2_4_chunk | 4 | f | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | | | | | | | | | | - _timescaledb_internal._dist_hyper_2_5_chunk | 1 | f | 0 | 8 | -1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | | | | | | | | | | - _timescaledb_internal._dist_hyper_2_5_chunk | 2 | f | 0 | 4 | -1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | | | | | | | | | | - _timescaledb_internal._dist_hyper_2_5_chunk | 3 | f | 0 | 8 | -1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | | | | | | | | | | - _timescaledb_internal._dist_hyper_2_5_chunk | 4 | f | 0 | 6 | -1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | | | | | | | | | | -(12 rows) - -SELECT test.remote_exec(NULL, $$ -SELECT ch, staattnum, stainherit, stanullfrac, stawidth, stadistinct, stakind1, stakind2, stakind3, stakind4, stakind5, staop1, staop2, staop3, staop4, staop5, -stanumbers1, stanumbers2, stanumbers3, stanumbers4, stanumbers5, stavalues1, stavalues2, stavalues3, stavalues4, stavalues5 -FROM pg_statistic st, show_chunks('disttable') ch -WHERE st.starelid = ch -ORDER BY ch, staattnum; -$$); -NOTICE: [db_chunk_api_1]: -SELECT ch, staattnum, stainherit, stanullfrac, stawidth, stadistinct, stakind1, stakind2, stakind3, stakind4, stakind5, staop1, staop2, staop3, staop4, staop5, -stanumbers1, stanumbers2, stanumbers3, stanumbers4, stanumbers5, stavalues1, stavalues2, stavalues3, stavalues4, stavalues5 -FROM pg_statistic st, show_chunks('disttable') ch -WHERE st.starelid = ch -ORDER BY ch, staattnum -NOTICE: [db_chunk_api_1]: -ch |staattnum|stainherit|stanullfrac|stawidth|stadistinct|stakind1|stakind2|stakind3|stakind4|stakind5|staop1|staop2|staop3|staop4|staop5|stanumbers1|stanumbers2|stanumbers3|stanumbers4|stanumbers5|stavalues1 |stavalues2|stavalues3|stavalues4|stavalues5 --------------------------------------------+---------+----------+-----------+--------+-----------+--------+--------+--------+--------+--------+------+------+------+------+------+-----------+-----------+-----------+-----------+-----------+---------------------------------------------------------------+----------+----------+----------+---------- -_timescaledb_internal._dist_hyper_2_3_chunk| 1|f | 0| 8| -1| 2| 3| 0| 0| 0| 1322| 1322| 0| 0| 0| |{1} | | | |{"Mon Jan 01 05:00:00 2018 PST","Mon Jan 01 06:00:00 2018 PST"}| | | | -_timescaledb_internal._dist_hyper_2_3_chunk| 2|f | 0| 4| -0.5| 1| 3| 0| 0| 0| 96| 97| 0| 0| 0|{1} |{1} | | | |{1} | | | | -_timescaledb_internal._dist_hyper_2_3_chunk| 3|f | 0| 8| -1| 2| 3| 0| 0| 0| 672| 672| 0| 0| 0| |{-1} | | | |{21.1,23.4} | | | | -_timescaledb_internal._dist_hyper_2_3_chunk| 4|f | 0| 6| -0.5| 1| 3| 0| 0| 0| 98| 664| 0| 0| 0|{1} |{1} | | | |{green} | | | | -_timescaledb_internal._dist_hyper_2_5_chunk| 1|f | 0| 8| -1| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| | | | | | | | | | -_timescaledb_internal._dist_hyper_2_5_chunk| 2|f | 0| 4| -1| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| | | | | | | | | | -_timescaledb_internal._dist_hyper_2_5_chunk| 3|f | 0| 8| -1| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| | | | | | | | | | -_timescaledb_internal._dist_hyper_2_5_chunk| 4|f | 0| 6| -1| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| | | | | | | | | | -(8 rows) - - -NOTICE: [db_chunk_api_2]: -SELECT ch, staattnum, stainherit, stanullfrac, stawidth, stadistinct, stakind1, stakind2, stakind3, stakind4, stakind5, staop1, staop2, staop3, staop4, staop5, -stanumbers1, stanumbers2, stanumbers3, stanumbers4, stanumbers5, stavalues1, stavalues2, stavalues3, stavalues4, stavalues5 -FROM pg_statistic st, show_chunks('disttable') ch -WHERE st.starelid = ch -ORDER BY ch, staattnum -NOTICE: [db_chunk_api_2]: -ch |staattnum|stainherit|stanullfrac|stawidth|stadistinct|stakind1|stakind2|stakind3|stakind4|stakind5|staop1|staop2|staop3|staop4|staop5|stanumbers1|stanumbers2|stanumbers3|stanumbers4|stanumbers5|stavalues1|stavalues2|stavalues3|stavalues4|stavalues5 --------------------------------------------+---------+----------+-----------+--------+-----------+--------+--------+--------+--------+--------+------+------+------+------+------+-----------+-----------+-----------+-----------+-----------+----------+----------+----------+----------+---------- -_timescaledb_internal._dist_hyper_2_4_chunk| 1|f | 0| 8| -1| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| | | | | | | | | | -_timescaledb_internal._dist_hyper_2_4_chunk| 2|f | 0| 4| -1| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| | | | | | | | | | -_timescaledb_internal._dist_hyper_2_4_chunk| 3|f | 0| 8| -1| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| | | | | | | | | | -_timescaledb_internal._dist_hyper_2_4_chunk| 4|f | 1| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| | | | | | | | | | -_timescaledb_internal._dist_hyper_2_5_chunk| 1|f | 0| 8| -1| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| | | | | | | | | | -_timescaledb_internal._dist_hyper_2_5_chunk| 2|f | 0| 4| -1| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| | | | | | | | | | -_timescaledb_internal._dist_hyper_2_5_chunk| 3|f | 0| 8| -1| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| | | | | | | | | | -_timescaledb_internal._dist_hyper_2_5_chunk| 4|f | 0| 6| -1| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| | | | | | | | | | -(8 rows) - - - remote_exec -------------- - -(1 row) - --- Clean up -RESET ROLE; -TRUNCATE disttable; -SELECT * FROM delete_data_node(:'DATA_NODE_1', force => true); -WARNING: insufficient number of data nodes for distributed hypertable "disttable" -NOTICE: the number of partitions in dimension "device" of hypertable "disttable" was decreased to 1 - delete_data_node ------------------- - t -(1 row) - -SELECT * FROM delete_data_node(:'DATA_NODE_2', force => true); -WARNING: insufficient number of data nodes for distributed hypertable "disttable" - delete_data_node ------------------- - t -(1 row) - -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); -- Test create_chunk_table to recreate the chunk table and show dimension slices SET ROLE :ROLE_DEFAULT_PERM_USER; SELECT * FROM chunkapi ORDER BY time; @@ -637,7 +325,7 @@ SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 2); NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- - 3 | public | chunkapi | t + 2 | public | chunkapi | t (1 row) SELECT count(*) FROM @@ -654,7 +342,7 @@ CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 2, '3d'); hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- - 4 | public | chunkapi | t + 3 | public | chunkapi | t (1 row) SELECT count(*) FROM @@ -670,7 +358,7 @@ CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 3); hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- - 5 | public | chunkapi | t + 4 | public | chunkapi | t (1 row) SELECT count(*) FROM @@ -687,7 +375,7 @@ CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 3); hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- - 6 | public | chunkapi | t + 5 | public | chunkapi | t (1 row) INSERT INTO chunkapi VALUES ('2018-01-01 05:00:00-8', 1, 23.4); @@ -701,7 +389,7 @@ CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 2); hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- - 7 | public | chunkapi | t + 6 | public | chunkapi | t (1 row) INSERT INTO chunkapi VALUES ('2018-01-01 05:00:00-8', 2, 23.4); @@ -718,7 +406,7 @@ CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 2); hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- - 8 | public | chunkapi | t + 7 | public | chunkapi | t (1 row) INSERT INTO chunkapi VALUES ('2018-02-01 05:00:00-8', 1, 23.4); @@ -744,7 +432,7 @@ CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 3); hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- - 9 | public | chunkapi | t + 8 | public | chunkapi | t (1 row) SELECT attach_tablespace('tablespace1', 'chunkapi'); @@ -781,7 +469,7 @@ CREATE TABLE chunkapi (time timestamptz NOT NULL PRIMARY KEY, device int REFEREN SELECT * FROM create_hypertable('chunkapi', 'time'); hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- - 10 | public | chunkapi | t + 9 | public | chunkapi | t (1 row) INSERT INTO chunkapi VALUES ('2018-01-01 05:00:00-8', 1, 23.4); @@ -828,43 +516,43 @@ SELECT count(*) FROM SELECT tablespace FROM pg_tables WHERE tablename = :'CHUNK_NAME'; tablespace ------------- - tablespace1 + tablespace2 (1 row) -- Now create the complete chunk from the chunk table SELECT _timescaledb_functions.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME', format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass); - create_chunk ---------------------------------------------------------------------------------------------------------- - (11,10,_timescaledb_internal,_hyper_10_10_chunk,r,"{""time"": [1514419200000000, 1515024000000000]}",t) + create_chunk +----------------------------------------------------------------------------------------------------- + (8,9,_timescaledb_internal,_hyper_9_7_chunk,r,"{""time"": [1514419200000000, 1515024000000000]}",t) (1 row) -- Compare original and new constraints SELECT * FROM original_chunk_constraints; - Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated ----------------------------+------+----------+--------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+------------+----------+----------- - 10_1_chunkapi_device_fkey | f | {device} | devices_pkey | | f | f | t - 10_2_chunkapi_pkey | p | {time} | _timescaledb_internal."10_2_chunkapi_pkey" | | f | f | t - chunkapi_temp_check | c | {temp} | - | (temp > (0)::double precision) | f | f | t - constraint_15 | c | {time} | - | (("time" >= 'Wed Dec 27 16:00:00 2017 PST'::timestamp with time zone) AND ("time" < 'Wed Jan 03 16:00:00 2018 PST'::timestamp with time zone)) | f | f | t + Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated +--------------------------+------+----------+-------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+------------+----------+----------- + 7_1_chunkapi_device_fkey | f | {device} | devices_pkey | | f | f | t + 7_2_chunkapi_pkey | p | {time} | _timescaledb_internal."7_2_chunkapi_pkey" | | f | f | t + chunkapi_temp_check | c | {temp} | - | (temp > (0)::double precision) | f | f | t + constraint_11 | c | {time} | - | (("time" >= 'Wed Dec 27 16:00:00 2017 PST'::timestamp with time zone) AND ("time" < 'Wed Jan 03 16:00:00 2018 PST'::timestamp with time zone)) | f | f | t (4 rows) SELECT * FROM test.show_constraints(format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass); - Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated ----------------------------+------+----------+--------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+------------+----------+----------- - 11_3_chunkapi_device_fkey | f | {device} | devices_pkey | | f | f | t - 11_4_chunkapi_pkey | p | {time} | _timescaledb_internal."11_4_chunkapi_pkey" | | f | f | t - chunkapi_temp_check | c | {temp} | - | (temp > (0)::double precision) | f | f | t - constraint_16 | c | {time} | - | (("time" >= 'Wed Dec 27 16:00:00 2017 PST'::timestamp with time zone) AND ("time" < 'Wed Jan 03 16:00:00 2018 PST'::timestamp with time zone)) | f | f | t + Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated +--------------------------+------+----------+-------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+------------+----------+----------- + 8_3_chunkapi_device_fkey | f | {device} | devices_pkey | | f | f | t + 8_4_chunkapi_pkey | p | {time} | _timescaledb_internal."8_4_chunkapi_pkey" | | f | f | t + chunkapi_temp_check | c | {temp} | - | (temp > (0)::double precision) | f | f | t + constraint_12 | c | {time} | - | (("time" >= 'Wed Dec 27 16:00:00 2017 PST'::timestamp with time zone) AND ("time" < 'Wed Jan 03 16:00:00 2018 PST'::timestamp with time zone)) | f | f | t (4 rows) -- Compare original and new chunk constraints metadata SELECT * FROM original_chunk_constraints_metadata; - chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name -----------+--------------------+---------------------------+---------------------------- - 10 | | 10_1_chunkapi_device_fkey | chunkapi_device_fkey - 10 | | 10_2_chunkapi_pkey | chunkapi_pkey - 10 | 15 | constraint_15 | + chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name +----------+--------------------+--------------------------+---------------------------- + 7 | | 7_1_chunkapi_device_fkey | chunkapi_device_fkey + 7 | | 7_2_chunkapi_pkey | chunkapi_pkey + 7 | 11 | constraint_11 | (3 rows) SELECT @@ -875,11 +563,11 @@ SELECT FROM _timescaledb_catalog.chunk_constraint con INNER JOIN _timescaledb_catalog.chunk ch ON (con.chunk_id = ch.id) WHERE ch.schema_name = :'CHUNK_SCHEMA' AND ch.table_name = :'CHUNK_NAME'; - chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name -----------+--------------------+---------------------------+---------------------------- - 11 | | 11_3_chunkapi_device_fkey | chunkapi_device_fkey - 11 | | 11_4_chunkapi_pkey | chunkapi_pkey - 11 | 16 | constraint_16 | + chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name +----------+--------------------+--------------------------+---------------------------- + 8 | | 8_3_chunkapi_device_fkey | chunkapi_device_fkey + 8 | | 8_4_chunkapi_pkey | chunkapi_pkey + 8 | 12 | constraint_12 | (3 rows) DROP TABLE original_chunk_constraints; @@ -899,9 +587,9 @@ SELECT :'CHUNK_NAME' AS expected_table_name, (_timescaledb_functions.show_chunk(ch)).* FROM show_chunks('chunkapi') ch; - expected_schema | expected_table_name | chunk_id | hypertable_id | schema_name | table_name | relkind | slices ------------------------+---------------------+----------+---------------+-----------------------+--------------------+---------+------------------------------------------------ - _timescaledb_internal | _hyper_10_10_chunk | 11 | 10 | _timescaledb_internal | _hyper_10_10_chunk | r | {"time": [1514419200000000, 1515024000000000]} + expected_schema | expected_table_name | chunk_id | hypertable_id | schema_name | table_name | relkind | slices +-----------------------+---------------------+----------+---------------+-----------------------+------------------+---------+------------------------------------------------ + _timescaledb_internal | _hyper_9_7_chunk | 8 | 9 | _timescaledb_internal | _hyper_9_7_chunk | r | {"time": [1514419200000000, 1515024000000000]} (1 row) DROP TABLE chunkapi; @@ -914,7 +602,7 @@ CREATE TABLE chunkapi (time timestamptz NOT NULL PRIMARY KEY, device int REFEREN SELECT * FROM create_hypertable('chunkapi', 'time'); hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- - 11 | public | chunkapi | t + 10 | public | chunkapi | t (1 row) CREATE TABLE newchunk (time timestamptz NOT NULL, device int, temp float); @@ -939,9 +627,9 @@ ALTER TABLE newchunk ADD CONSTRAINT chunkapi_temp_check CHECK (temp > 0); CREATE TABLE newchunk2 as select * from newchunk; ALTER TABLE newchunk2 ADD CONSTRAINT chunkapi_temp_check CHECK (temp > 0); SELECT * FROM _timescaledb_functions.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME', 'newchunk'); - chunk_id | hypertable_id | schema_name | table_name | relkind | slices | created -----------+---------------+-----------------------+--------------------+---------+------------------------------------------------+--------- - 13 | 11 | _timescaledb_internal | _hyper_10_10_chunk | r | {"time": [1514419200000000, 1515024000000000]} | t + chunk_id | hypertable_id | schema_name | table_name | relkind | slices | created +----------+---------------+-----------------------+------------------+---------+------------------------------------------------+--------- + 10 | 10 | _timescaledb_internal | _hyper_9_7_chunk | r | {"time": [1514419200000000, 1515024000000000]} | t (1 row) -- adding an existing table to an exiting range must fail @@ -955,9 +643,9 @@ SELECT :'CHUNK_NAME' AS expected_table_name, (_timescaledb_functions.show_chunk(ch)).* FROM show_chunks('chunkapi') ch; - expected_schema | expected_table_name | chunk_id | hypertable_id | schema_name | table_name | relkind | slices ------------------------+---------------------+----------+---------------+-----------------------+--------------------+---------+------------------------------------------------ - _timescaledb_internal | _hyper_10_10_chunk | 13 | 11 | _timescaledb_internal | _hyper_10_10_chunk | r | {"time": [1514419200000000, 1515024000000000]} + expected_schema | expected_table_name | chunk_id | hypertable_id | schema_name | table_name | relkind | slices +-----------------------+---------------------+----------+---------------+-----------------------+------------------+---------+------------------------------------------------ + _timescaledb_internal | _hyper_9_7_chunk | 10 | 10 | _timescaledb_internal | _hyper_9_7_chunk | r | {"time": [1514419200000000, 1515024000000000]} (1 row) -- The chunk should inherit the hypertable @@ -983,16 +671,10 @@ SELECT * FROM chunkapi ORDER BY 1,2,3; SELECT * FROM test.show_constraints(format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass); Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated ---------------------------+------+----------+--------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+------------+----------+----------- - 13_7_chunkapi_device_fkey | f | {device} | devices_pkey | | f | f | t - 13_8_chunkapi_pkey | p | {time} | _timescaledb_internal."13_8_chunkapi_pkey" | | f | f | t + 10_7_chunkapi_device_fkey | f | {device} | devices_pkey | | f | f | t + 10_8_chunkapi_pkey | p | {time} | _timescaledb_internal."10_8_chunkapi_pkey" | | f | f | t chunkapi_temp_check | c | {temp} | - | (temp > (0)::double precision) | f | f | t - constraint_18 | c | {time} | - | (("time" >= 'Wed Dec 27 16:00:00 2017 PST'::timestamp with time zone) AND ("time" < 'Wed Jan 03 16:00:00 2018 PST'::timestamp with time zone)) | f | f | t + constraint_14 | c | {time} | - | (("time" >= 'Wed Dec 27 16:00:00 2017 PST'::timestamp with time zone) AND ("time" < 'Wed Jan 03 16:00:00 2018 PST'::timestamp with time zone)) | f | f | t (4 rows) DROP TABLE chunkapi; -\c :TEST_DBNAME :ROLE_SUPERUSER -SET client_min_messages = ERROR; -DROP TABLESPACE tablespace1; -DROP TABLESPACE tablespace2; -SET client_min_messages = NOTICE; -\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER diff --git a/tsl/test/expected/chunk_utils_internal.out b/tsl/test/expected/chunk_utils_internal.out index cec4cac6465..616ddf54696 100644 --- a/tsl/test/expected/chunk_utils_internal.out +++ b/tsl/test/expected/chunk_utils_internal.out @@ -803,44 +803,6 @@ ORDER BY table_name; ------------+--------+----------- (0 rows) --- TEST error try freeze/unfreeze on dist hypertable --- Add distributed hypertables -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 -\c :TEST_DBNAME :ROLE_SUPERUSER -SELECT node_name, database, node_created, database_created, extension_created -FROM ( - SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* - FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2')) v(name) -) a; - node_name | database | node_created | database_created | extension_created ----------------------------+---------------------------+--------------+------------------+------------------- - db_chunk_utils_internal_1 | db_chunk_utils_internal_1 | t | t | t - db_chunk_utils_internal_2 | db_chunk_utils_internal_2 | t | t | t -(2 rows) - -CREATE TABLE disthyper (timec timestamp, device integer); -SELECT create_distributed_hypertable('disthyper', 'timec', 'device'); -WARNING: column type "timestamp without time zone" used for "timec" does not follow best practices -NOTICE: adding not-null constraint to column "timec" - create_distributed_hypertable -------------------------------- - (6,public,disthyper,t) -(1 row) - -INSERT into disthyper VALUES ('2020-01-01', 10); ---freeze one of the chunks -SELECT chunk_schema || '.' || chunk_name as "CHNAME3" -FROM timescaledb_information.chunks -WHERE hypertable_name = 'disthyper' -ORDER BY chunk_name LIMIT 1 -\gset -\set ON_ERROR_STOP 0 -SELECT _timescaledb_functions.freeze_chunk( :'CHNAME3'); -ERROR: operation not supported on distributed chunk or foreign table "_dist_hyper_6_12_chunk" -SELECT _timescaledb_functions.unfreeze_chunk( :'CHNAME3'); -ERROR: operation not supported on distributed chunk or foreign table "_dist_hyper_6_12_chunk" -\set ON_ERROR_STOP 1 -- TEST can create OSM chunk if there are constraints on the hypertable \c :TEST_DBNAME :ROLE_4 CREATE TABLE measure( id integer PRIMARY KEY, mname varchar(10)); @@ -853,7 +815,7 @@ CREATE TABLE hyper_constr ( id integer, time bigint, temp float, mid integer SELECT create_hypertable('hyper_constr', 'time', chunk_time_interval => 10); create_hypertable --------------------------- - (7,public,hyper_constr,t) + (6,public,hyper_constr,t) (1 row) INSERT INTO hyper_constr VALUES( 10, 200, 22, 1); @@ -886,7 +848,7 @@ WHERE hypertable_id IN (SELECT id from _timescaledb_catalog.hypertable ORDER BY table_name; table_name | status | osm_chunk --------------------+--------+----------- - _hyper_7_13_chunk | 0 | f + _hyper_6_12_chunk | 0 | f child_hyper_constr | 0 | t (2 rows) @@ -935,8 +897,8 @@ where hypertable_id = (Select id from _timescaledb_catalog.hypertable where tabl ORDER BY id; id | table_name ----+-------------------- - 13 | _hyper_7_13_chunk - 14 | child_hyper_constr + 12 | _hyper_6_12_chunk + 13 | child_hyper_constr (2 rows) ROLLBACK; @@ -968,7 +930,7 @@ CREATE TABLE test1.copy_test ( SELECT create_hypertable('test1.copy_test', 'time', chunk_time_interval => interval '1 day'); create_hypertable ----------------------- - (8,test1,copy_test,t) + (7,test1,copy_test,t) (1 row) COPY test1.copy_test FROM STDIN DELIMITER ','; @@ -989,13 +951,13 @@ SELECT table_name, status FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME'; table_name | status -------------------+-------- - _hyper_8_15_chunk | 4 + _hyper_7_14_chunk | 4 (1 row) \set ON_ERROR_STOP 0 -- Copy should fail because one of che chunks is frozen COPY test1.copy_test FROM STDIN DELIMITER ','; -ERROR: cannot INSERT into frozen chunk "_hyper_8_15_chunk" +ERROR: cannot INSERT into frozen chunk "_hyper_7_14_chunk" \set ON_ERROR_STOP 1 -- Count existing rows SELECT COUNT(*) FROM test1.copy_test; @@ -1009,13 +971,13 @@ SELECT table_name, status FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME'; table_name | status -------------------+-------- - _hyper_8_15_chunk | 4 + _hyper_7_14_chunk | 4 (1 row) \set ON_ERROR_STOP 0 -- Copy should fail because one of che chunks is frozen COPY test1.copy_test FROM STDIN DELIMITER ','; -ERROR: cannot INSERT into frozen chunk "_hyper_8_15_chunk" +ERROR: cannot INSERT into frozen chunk "_hyper_7_14_chunk" \set ON_ERROR_STOP 1 -- Count existing rows SELECT COUNT(*) FROM test1.copy_test; @@ -1036,7 +998,7 @@ SELECT table_name, status FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME'; table_name | status -------------------+-------- - _hyper_8_15_chunk | 0 + _hyper_7_14_chunk | 0 (1 row) -- Copy should work now @@ -1131,12 +1093,12 @@ WHERE ht.table_name LIKE 'osm%' ORDER BY 2,3; table_name | id | dimension_id | range_start | range_end ------------+----+--------------+---------------------+--------------------- - osm_int2 | 17 | 9 | 9223372036854775806 | 9223372036854775807 - osm_int4 | 18 | 10 | 9223372036854775806 | 9223372036854775807 - osm_int8 | 19 | 11 | 9223372036854775806 | 9223372036854775807 - osm_date | 20 | 12 | 9223372036854775806 | 9223372036854775807 - osm_ts | 21 | 13 | 9223372036854775806 | 9223372036854775807 - osm_tstz | 22 | 14 | 9223372036854775806 | 9223372036854775807 + osm_int2 | 15 | 7 | 9223372036854775806 | 9223372036854775807 + osm_int4 | 16 | 8 | 9223372036854775806 | 9223372036854775807 + osm_int8 | 17 | 9 | 9223372036854775806 | 9223372036854775807 + osm_date | 18 | 10 | 9223372036854775806 | 9223372036854775807 + osm_ts | 19 | 11 | 9223372036854775806 | 9223372036854775807 + osm_tstz | 20 | 12 | 9223372036854775806 | 9223372036854775807 (6 rows) -- test that correct slice is found and updated for table with multiple chunk constraints @@ -1149,8 +1111,8 @@ _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc WHERE c.h AND c.id = cc.chunk_id; id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk | chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name ----+---------------+-----------------------+--------------------+---------------------+---------+--------+-----------+----------+--------------------+-----------------------------+---------------------------- - 23 | 15 | _timescaledb_internal | _hyper_15_23_chunk | | f | 0 | f | 23 | | 23_3_test_multicon_time_key | test_multicon_time_key - 23 | 15 | _timescaledb_internal | _hyper_15_23_chunk | | f | 0 | f | 23 | 23 | constraint_23 | + 22 | 14 | _timescaledb_internal | _hyper_14_22_chunk | | f | 0 | f | 22 | | 22_3_test_multicon_time_key | test_multicon_time_key + 22 | 14 | _timescaledb_internal | _hyper_14_22_chunk | | f | 0 | f | 22 | 21 | constraint_21 | (2 rows) \c :TEST_DBNAME :ROLE_SUPERUSER ; @@ -1168,7 +1130,7 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+--------------------+--------+-----------+--------------------+------------------+------------------ - 23 | _hyper_15_23_chunk | 0 | t | 23 | 1577955600000000 | 1578128400000000 + 22 | _hyper_14_22_chunk | 0 | t | 21 | 1577955600000000 | 1578128400000000 (1 row) -- check that range was reset to default - infinity @@ -1196,7 +1158,7 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+--------------------+--------+-----------+--------------------+---------------------+--------------------- - 23 | _hyper_15_23_chunk | 0 | t | 23 | 9223372036854775806 | 9223372036854775807 + 22 | _hyper_14_22_chunk | 0 | t | 21 | 9223372036854775806 | 9223372036854775807 (1 row) -- test further with ordered append @@ -1220,9 +1182,9 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+-------------------------+--------+-----------+--------------------+---------------------+--------------------- - 24 | _hyper_16_24_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000 - 25 | _hyper_16_25_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000 - 26 | test_chunkapp_fdw_child | 0 | t | 26 | 9223372036854775806 | 9223372036854775807 + 23 | _hyper_15_23_chunk | 0 | f | 22 | 1577836800000000 | 1577923200000000 + 24 | _hyper_15_24_chunk | 0 | f | 23 | 1577923200000000 | 1578009600000000 + 25 | test_chunkapp_fdw_child | 0 | t | 24 | 9223372036854775806 | 9223372036854775807 (3 rows) -- attempt to update overlapping range, should fail @@ -1243,9 +1205,9 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+-------------------------+--------+-----------+--------------------+------------------+------------------ - 24 | _hyper_16_24_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000 - 25 | _hyper_16_25_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000 - 26 | test_chunkapp_fdw_child | 0 | t | 26 | 1578038400000000 | 1578124800000000 + 23 | _hyper_15_23_chunk | 0 | f | 22 | 1577836800000000 | 1577923200000000 + 24 | _hyper_15_24_chunk | 0 | f | 23 | 1577923200000000 | 1578009600000000 + 25 | test_chunkapp_fdw_child | 0 | t | 24 | 1578038400000000 | 1578124800000000 (3 rows) -- ordered append should be possible as ranges do not overlap @@ -1254,8 +1216,8 @@ EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; ---------------------------------------------------------------------------------------------------------------------------------------- Custom Scan (ChunkAppend) on test_chunkapp (cost=0.15..270.31 rows=6355 width=12) Order: test_chunkapp."time" - -> Index Scan Backward using _hyper_16_24_chunk_test_chunkapp_time_idx on _hyper_16_24_chunk (cost=0.15..42.75 rows=2040 width=12) - -> Index Scan Backward using _hyper_16_25_chunk_test_chunkapp_time_idx on _hyper_16_25_chunk (cost=0.15..42.75 rows=2040 width=12) + -> Index Scan Backward using _hyper_15_23_chunk_test_chunkapp_time_idx on _hyper_15_23_chunk (cost=0.15..42.75 rows=2040 width=12) + -> Index Scan Backward using _hyper_15_24_chunk_test_chunkapp_time_idx on _hyper_15_24_chunk (cost=0.15..42.75 rows=2040 width=12) -> Foreign Scan on test_chunkapp_fdw_child (cost=100.00..184.80 rows=2275 width=12) (5 rows) @@ -1296,9 +1258,9 @@ EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------- Merge Append (cost=100.33..352.47 rows=6355 width=12) - Sort Key: _hyper_16_24_chunk."time" - -> Index Scan Backward using _hyper_16_24_chunk_test_chunkapp_time_idx on _hyper_16_24_chunk (cost=0.15..42.75 rows=2040 width=12) - -> Index Scan Backward using _hyper_16_25_chunk_test_chunkapp_time_idx on _hyper_16_25_chunk (cost=0.15..42.75 rows=2040 width=12) + Sort Key: _hyper_15_23_chunk."time" + -> Index Scan Backward using _hyper_15_23_chunk_test_chunkapp_time_idx on _hyper_15_23_chunk (cost=0.15..42.75 rows=2040 width=12) + -> Index Scan Backward using _hyper_15_24_chunk_test_chunkapp_time_idx on _hyper_15_24_chunk (cost=0.15..42.75 rows=2040 width=12) -> Foreign Scan on test_chunkapp_fdw_child (cost=100.00..184.80 rows=2275 width=12) (5 rows) @@ -1315,9 +1277,9 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+-------------------------+--------+-----------+--------------------+---------------------+--------------------- - 24 | _hyper_16_24_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000 - 25 | _hyper_16_25_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000 - 26 | test_chunkapp_fdw_child | 0 | t | 26 | 9223372036854775806 | 9223372036854775807 + 23 | _hyper_15_23_chunk | 0 | f | 22 | 1577836800000000 | 1577923200000000 + 24 | _hyper_15_24_chunk | 0 | f | 23 | 1577923200000000 | 1578009600000000 + 25 | test_chunkapp_fdw_child | 0 | t | 24 | 9223372036854775806 | 9223372036854775807 (3 rows) -- now set empty to true, should ordered append @@ -1335,8 +1297,8 @@ EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; ---------------------------------------------------------------------------------------------------------------------------------------- Custom Scan (ChunkAppend) on test_chunkapp (cost=0.15..270.31 rows=6355 width=12) Order: test_chunkapp."time" - -> Index Scan Backward using _hyper_16_24_chunk_test_chunkapp_time_idx on _hyper_16_24_chunk (cost=0.15..42.75 rows=2040 width=12) - -> Index Scan Backward using _hyper_16_25_chunk_test_chunkapp_time_idx on _hyper_16_25_chunk (cost=0.15..42.75 rows=2040 width=12) + -> Index Scan Backward using _hyper_15_23_chunk_test_chunkapp_time_idx on _hyper_15_23_chunk (cost=0.15..42.75 rows=2040 width=12) + -> Index Scan Backward using _hyper_15_24_chunk_test_chunkapp_time_idx on _hyper_15_24_chunk (cost=0.15..42.75 rows=2040 width=12) -> Foreign Scan on test_chunkapp_fdw_child (cost=100.00..184.80 rows=2275 width=12) (5 rows) @@ -1352,7 +1314,7 @@ CREATE TABLE test2(time timestamptz not null, a int); SELECT create_hypertable('test2', 'time'); create_hypertable --------------------- - (17,public,test2,t) + (16,public,test2,t) (1 row) INSERT INTO test2 VALUES ('2020-01-01'::timestamptz, 1); @@ -1360,7 +1322,7 @@ ALTER TABLE test2 SET (timescaledb.compress); SELECT compress_chunk(show_chunks('test2')); compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_17_27_chunk + _timescaledb_internal._hyper_16_26_chunk (1 row) -- find internal compression table, call API function on it @@ -1369,7 +1331,7 @@ FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.hypertable cht WHERE ht.table_name = 'test2' and cht.id = ht.compressed_hypertable_id \gset \set ON_ERROR_STOP 0 SELECT _timescaledb_functions.hypertable_osm_range_update(:'COMPRESSION_TBLNM'::regclass, '2020-01-01'::timestamptz); -ERROR: could not find time dimension for hypertable _timescaledb_internal._compressed_hypertable_18 +ERROR: could not find time dimension for hypertable _timescaledb_internal._compressed_hypertable_17 \set ON_ERROR_STOP 1 -- test wrong/incompatible data types with hypertable time dimension -- update range of int2 with int4 @@ -1422,5 +1384,3 @@ ERROR: Cannot insert into tiered chunk range of public.osm_slice_update - attem -- clean up databases created \c :TEST_DBNAME :ROLE_SUPERUSER DROP DATABASE postgres_fdw_db WITH (FORCE); -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); diff --git a/tsl/test/expected/data_fetcher.out b/tsl/test/expected/data_fetcher.out index 3f61b25eec1..2c66d7b8b33 100644 --- a/tsl/test/expected/data_fetcher.out +++ b/tsl/test/expected/data_fetcher.out @@ -17,6 +17,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------------+-------------------+--------------+------------------+------------------- db_data_fetcher_1 | db_data_fetcher_1 | t | t | t @@ -26,6 +29,7 @@ FROM ( CREATE TABLE disttable(time timestamptz NOT NULL, device int, temp float); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 3); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 1 | public | disttable | t @@ -45,6 +49,7 @@ FROM generate_series('2019-01-01'::timestamptz, '2019-01-02'::timestamptz, '1 se -- copy batch is the file trailer (#5323). CREATE table one_batch(ts timestamptz NOT NULL, sensor_id int NOT NULL, value float NOT NULL); SELECT create_distributed_hypertable('one_batch', 'ts'); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (2,public,one_batch,t) @@ -54,6 +59,7 @@ INSERT INTO one_batch SELECT '2023-01-01'::timestamptz AS time, sensor_id, rando -- Same but for the DEFAULT_FDW_FETCH_SIZE (10000) CREATE table one_batch_default(ts timestamptz NOT NULL, sensor_id int NOT NULL, value float NOT NULL); SELECT create_distributed_hypertable('one_batch_default', 'ts'); +WARNING: distributed hypertable is deprecated create_distributed_hypertable -------------------------------- (3,public,one_batch_default,t) diff --git a/tsl/test/expected/data_node.out b/tsl/test/expected/data_node.out index e2f4f2a271b..10d92780744 100644 --- a/tsl/test/expected/data_node.out +++ b/tsl/test/expected/data_node.out @@ -21,12 +21,14 @@ ORDER BY dimension_id, range_start; GRANT SELECT ON hypertable_partitions TO :ROLE_1; -- Add data nodes using TimescaleDB data_node management API. SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_1 | db_data_node_1 | t | t | t (1 row) SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_2', 'localhost', database => :'DN_DBNAME_2'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_2 | db_data_node_2 | t | t | t @@ -35,6 +37,7 @@ SELECT node_name, database, node_created, database_created, extension_created FR \set ON_ERROR_STOP 0 -- Add again SELECT * FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); +WARNING: adding data node is deprecated ERROR: server "data_node_2" already exists -- No host provided SELECT * FROM add_data_node('data_node_99'); @@ -43,21 +46,26 @@ SELECT * FROM add_data_node(NULL); ERROR: function add_data_node(unknown) does not exist at character 15 -- Add NULL data_node SELECT * FROM add_data_node(NULL, host => 'localhost'); +WARNING: adding data node is deprecated ERROR: data node name cannot be NULL SELECT * FROM add_data_node(NULL, NULL); +WARNING: adding data node is deprecated ERROR: a host needs to be specified -- Test invalid port numbers SELECT * FROM add_data_node('data_node_3', 'localhost', port => 65536, database => :'DN_DBNAME_3'); +WARNING: adding data node is deprecated ERROR: invalid port number 65536 SELECT * FROM add_data_node('data_node_3', 'localhost', port => 0, database => :'DN_DBNAME_3'); +WARNING: adding data node is deprecated ERROR: invalid port number 0 SELECT * FROM add_data_node('data_node_3', 'localhost', port => -1, database => :'DN_DBNAME_3'); +WARNING: adding data node is deprecated ERROR: invalid port number -1 SELECT inet_server_port() as PGPORT \gset -- Adding a data node via ADD SERVER is blocked @@ -71,6 +79,7 @@ ERROR: operation not supported on a TimescaleDB data node -- Should not generate error with if_not_exists option SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2', if_not_exists => true); +WARNING: adding data node is deprecated NOTICE: data node "data_node_2" already exists, skipping node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- @@ -78,6 +87,7 @@ NOTICE: data node "data_node_2" already exists, skipping (1 row) SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_3', host => 'localhost', database => :'DN_DBNAME_3'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_3 | db_data_node_3 | t | t | t @@ -122,6 +132,7 @@ SELECT node_name FROM timescaledb_information.data_nodes ORDER BY node_name; (3 rows) SELECT * FROM delete_data_node('data_node_3'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -138,10 +149,12 @@ SELECT node_name FROM timescaledb_information.data_nodes ORDER BY node_name; \set ON_ERROR_STOP 0 -- Deleting a non-existing data node generates error SELECT * FROM delete_data_node('data_node_3'); +WARNING: deleting data node is deprecated ERROR: server "data_node_3" does not exist \set ON_ERROR_STOP 1 -- Deleting non-existing data node with "if_exists" set does not generate error SELECT * FROM delete_data_node('data_node_3', if_exists => true); +WARNING: deleting data node is deprecated NOTICE: data node "data_node_3" does not exist, skipping delete_data_node ------------------ @@ -156,12 +169,14 @@ SELECT node_name FROM timescaledb_information.data_nodes ORDER BY node_name; (2 rows) SELECT * FROM delete_data_node('data_node_1'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t (1 row) SELECT * FROM delete_data_node('data_node_2'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -186,18 +201,21 @@ DROP DATABASE :DN_DBNAME_2 WITH (FORCE); DROP DATABASE :DN_DBNAME_3 WITH (FORCE); SET client_min_messages TO INFO; SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_1 | db_data_node_1 | t | t | t (1 row) SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_2 | db_data_node_2 | t | t | t (1 row) SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_3', host => 'localhost', database => :'DN_DBNAME_3'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_3 | db_data_node_3 | t | t | t @@ -212,6 +230,8 @@ CREATE TABLE disttable(time timestamptz, device int, temp float); \set ON_ERROR_STOP 0 \set VERBOSITY default SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device'); +WARNING: distributed hypertable is deprecated +DETAIL: Multi-node is deprecated and will be removed in future releases. NOTICE: 3 of 3 data nodes not used by this hypertable due to lack of permissions HINT: Grant USAGE on data nodes to attach them to a hypertable. ERROR: no data nodes can be assigned to the hypertable @@ -258,6 +278,7 @@ SET ROLE :ROLE_1; -- slices in the device dimension equals the number of data nodes. BEGIN; SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device'); +WARNING: distributed hypertable is deprecated NOTICE: 1 of 3 data nodes not used by this hypertable due to lack of permissions NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created @@ -297,6 +318,7 @@ ROLLBACK; \set ON_ERROR_STOP 0 SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', data_nodes => '{ data_node_1, data_node_2, data_node_3 }'); +WARNING: distributed hypertable is deprecated ERROR: permission denied for foreign server data_node_3 \set ON_ERROR_STOP 1 RESET ROLE; @@ -310,6 +332,7 @@ SET ROLE :ROLE_1; -- Now specify less slices than there are data nodes to generate a -- warning SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 2); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" WARNING: insufficient number of partitions for dimension "device" hypertable_id | schema_name | table_name | created @@ -337,6 +360,7 @@ SELECT replication_factor FROM _timescaledb_catalog.hypertable WHERE table_name SELECT * FROM test.remote_exec(NULL, $$ SELECT replication_factor FROM _timescaledb_catalog.hypertable WHERE table_name = 'disttable'; $$); +WARNING: executing remote command is deprecated NOTICE: [data_node_1]: SELECT replication_factor FROM _timescaledb_catalog.hypertable WHERE table_name = 'disttable' NOTICE: [data_node_1]: @@ -402,25 +426,31 @@ CREATE TABLE disttable(time timestamptz, device int, temp float); \set ON_ERROR_STOP 0 -- Attach data node should fail when called on a non-hypertable SELECT * FROM attach_data_node('data_node_1', 'disttable'); +WARNING: attaching data node is deprecated ERROR: table "disttable" is not a hypertable -- Test some bad create_hypertable() parameter values for distributed hypertables -- Bad replication factor SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', replication_factor => 0, data_nodes => '{ "data_node_2", "data_node_4" }'); +WARNING: distributed hypertable is deprecated ERROR: invalid replication factor SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', replication_factor => 32768); +WARNING: distributed hypertable is deprecated ERROR: replication factor too large for hypertable "disttable" SELECT * FROM create_hypertable('disttable', 'time', replication_factor => -1); ERROR: invalid replication factor SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', replication_factor => -1); +WARNING: distributed hypertable is deprecated ERROR: invalid replication factor -- Non-existing data node SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', replication_factor => 2, data_nodes => '{ "data_node_4" }'); +WARNING: distributed hypertable is deprecated ERROR: server "data_node_4" does not exist \set ON_ERROR_STOP 1 -- Use a subset of data nodes and a replication factor of two so that -- each chunk is associated with more than one data node. Set -- number_partitions lower than number of servers to raise a warning SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', number_partitions => 1, replication_factor => 2, data_nodes => '{ "data_node_2", "data_node_3" }'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" WARNING: insufficient number of partitions for dimension "device" hypertable_id | schema_name | table_name | created @@ -536,6 +566,7 @@ SELECT * FROM _timescaledb_functions.set_chunk_default_data_node('_timescaledb_i -- Will fail because data_node_2 contains chunks SET ROLE :ROLE_CLUSTER_SUPERUSER; SELECT * FROM delete_data_node('data_node_2'); +WARNING: deleting data node is deprecated ERROR: data node "data_node_2" still holds data for distributed hypertable "disttable" -- non-existing chunk SELECT * FROM _timescaledb_functions.set_chunk_default_data_node('x_chunk', 'data_node_3'); @@ -565,6 +596,7 @@ SELECT * FROM hypertable_partitions; (1 row) SELECT * FROM delete_data_node('data_node_2', force => true); +WARNING: deleting data node is deprecated WARNING: distributed hypertable "disttable" is under-replicated WARNING: insufficient number of data nodes for distributed hypertable "disttable" delete_data_node @@ -617,12 +649,14 @@ SELECT * FROM _timescaledb_catalog.chunk_data_node; \set ON_ERROR_STOP 0 -- can't delete b/c it's last data replica SELECT * FROM delete_data_node('data_node_3', force => true); +WARNING: deleting data node is deprecated ERROR: insufficient number of data nodes \set ON_ERROR_STOP 1 -- Removing all data allows us to delete the data node by force, but -- with WARNING that new data will be under-replicated TRUNCATE disttable; SELECT * FROM delete_data_node('data_node_3', force => true); +WARNING: deleting data node is deprecated WARNING: insufficient number of data nodes for distributed hypertable "disttable" delete_data_node ------------------ @@ -652,6 +686,7 @@ SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, -- Attach data node should now succeed SET client_min_messages TO NOTICE; SELECT * FROM attach_data_node('data_node_1', 'disttable'); +WARNING: attaching data node is deprecated hypertable_id | node_hypertable_id | node_name ---------------+--------------------+------------- 3 | 3 | data_node_1 @@ -718,6 +753,7 @@ SELECT * FROM _timescaledb_functions.ping_data_node('pg_data_node_1'); ERROR: server "pg_data_node_1" does not exist -- ERROR on attaching to non-distributed hypertable SELECT * FROM attach_data_node('data_node_1', 'standalone'); +WARNING: attaching data node is deprecated ERROR: hypertable "standalone" is not distributed \set ON_ERROR_STOP 1 DROP TABLE standalone; @@ -725,24 +761,30 @@ DROP TABLE standalone; \set ON_ERROR_STOP 0 -- Invalid arguments SELECT * FROM attach_data_node('data_node_1', NULL, true); +WARNING: attaching data node is deprecated ERROR: hypertable cannot be NULL SELECT * FROM attach_data_node(NULL, 'disttable', true); +WARNING: attaching data node is deprecated ERROR: data node name cannot be NULL -- Deleted data node SELECT * FROM attach_data_node('data_node_2', 'disttable'); +WARNING: attaching data node is deprecated ERROR: server "data_node_2" does not exist -- Attaching to an already attached data node without 'if_not_exists' SELECT * FROM attach_data_node('data_node_1', 'disttable', false); +WARNING: attaching data node is deprecated ERROR: data node "data_node_1" is already attached to hypertable "disttable" -- Attaching a data node to another data node \c :DN_DBNAME_1 SELECT * FROM attach_data_node('data_node_4', 'disttable'); +WARNING: attaching data node is deprecated ERROR: hypertable "disttable" is not distributed \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; SET ROLE :ROLE_1; \set ON_ERROR_STOP 1 -- Attach if not exists SELECT * FROM attach_data_node('data_node_1', 'disttable', true); +WARNING: attaching data node is deprecated NOTICE: data node "data_node_1" is already attached to hypertable "disttable", skipping hypertable_id | node_hypertable_id | node_name ---------------+--------------------+------------- @@ -763,6 +805,7 @@ AND column_name = 'device'; SET ROLE :ROLE_CLUSTER_SUPERUSER; SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_4', host => 'localhost', database => :'DN_DBNAME_4', if_not_exists => true); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_4 | db_data_node_4 | t | t | t @@ -775,6 +818,7 @@ GRANT USAGE -- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes GRANT CREATE ON SCHEMA public TO :ROLE_1; SELECT * FROM attach_data_node('data_node_4', 'disttable'); +WARNING: attaching data node is deprecated NOTICE: the number of partitions in dimension "device" was increased to 2 hypertable_id | node_hypertable_id | node_name ---------------+--------------------+------------- @@ -783,6 +827,7 @@ NOTICE: the number of partitions in dimension "device" was increased to 2 -- Recheck that ownership on data_node_4 is proper SELECT * FROM test.remote_exec(NULL, $$ SELECT tablename, tableowner from pg_catalog.pg_tables where tablename = 'disttable'; $$); +WARNING: executing remote command is deprecated NOTICE: [data_node_1]: SELECT tablename, tableowner from pg_catalog.pg_tables where tablename = 'disttable' NOTICE: [data_node_1]: tablename|tableowner @@ -817,6 +862,7 @@ AND column_name = 'device'; -- Clean up DROP TABLE disttable; SELECT * FROM delete_data_node('data_node_4'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -828,10 +874,12 @@ CREATE TABLE disttable(time timestamptz, device int, temp float); \set ON_ERROR_STOP 0 -- Creating a distributed hypertable without any data nodes should fail SELECT * FROM create_distributed_hypertable('disttable', 'time', data_nodes => '{ }'); +WARNING: distributed hypertable is deprecated ERROR: no data nodes can be assigned to the hypertable \set ON_ERROR_STOP 1 SET ROLE :ROLE_CLUSTER_SUPERUSER; SELECT * FROM delete_data_node('data_node_1'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -865,6 +913,7 @@ SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, \set ON_ERROR_STOP 0 -- No data nodes remain, so should fail SELECT * FROM create_distributed_hypertable('disttable', 'time'); +WARNING: distributed hypertable is deprecated ERROR: no data nodes can be assigned to the hypertable \set ON_ERROR_STOP 1 -- These data nodes have been deleted, so safe to remove their databases. @@ -880,18 +929,21 @@ SELECT node_name FROM timescaledb_information.data_nodes ORDER BY node_name; -- let's add some SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_1 | db_data_node_1 | t | t | t (1 row) SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_2 | db_data_node_2 | t | t | t (1 row) SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_3', host => 'localhost', database => :'DN_DBNAME_3'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_3 | db_data_node_3 | t | t | t @@ -906,6 +958,7 @@ CREATE TABLE disttable(time timestamptz, device int, temp float); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 2, replication_factor => 2, data_nodes => '{"data_node_1", "data_node_2", "data_node_3"}'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" WARNING: insufficient number of partitions for dimension "device" hypertable_id | schema_name | table_name | created @@ -963,6 +1016,7 @@ SELECT * FROM hypertable_partitions; -- Add additional hypertable CREATE TABLE disttable_2(time timestamptz, device int, temp float); SELECT * FROM create_distributed_hypertable('disttable_2', 'time', 'device', 2, replication_factor => 2, data_nodes => '{"data_node_1", "data_node_2", "data_node_3"}'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" WARNING: insufficient number of partitions for dimension "device" hypertable_id | schema_name | table_name | created @@ -1195,6 +1249,7 @@ ORDER BY table_name; -- Detach should work b/c disttable_2 has no data and more data nodes -- than replication factor SELECT * FROM detach_data_node('data_node_2', 'disttable_2'); +WARNING: detaching data node is deprecated detach_data_node ------------------ 1 @@ -1203,27 +1258,35 @@ SELECT * FROM detach_data_node('data_node_2', 'disttable_2'); \set ON_ERROR_STOP 0 -- can't detach non-existing data node SELECT * FROM detach_data_node('data_node_12345', 'disttable'); +WARNING: detaching data node is deprecated ERROR: server "data_node_12345" does not exist -- NULL data node SELECT * FROM detach_data_node(NULL, 'disttable'); +WARNING: detaching data node is deprecated ERROR: data node name cannot be NULL -- Can't detach data node_1 b/c it contains data for disttable SELECT * FROM detach_data_node('data_node_1'); +WARNING: detaching data node is deprecated ERROR: data node "data_node_1" still holds data for distributed hypertable "disttable" -- can't detach already detached data node SELECT * FROM detach_data_node('data_node_2', 'disttable_2'); +WARNING: detaching data node is deprecated ERROR: data node "data_node_2" is not attached to hypertable "disttable_2" SELECT * FROM detach_data_node('data_node_2', 'disttable_2', if_attached => false); +WARNING: detaching data node is deprecated ERROR: data node "data_node_2" is not attached to hypertable "disttable_2" -- can't detach b/c of replication factor for disttable_2 SELECT * FROM detach_data_node('data_node_3', 'disttable_2'); +WARNING: detaching data node is deprecated ERROR: insufficient number of data nodes for distributed hypertable "disttable_2" -- can't detach non hypertable SELECT * FROM detach_data_node('data_node_3', 'devices'); +WARNING: detaching data node is deprecated ERROR: table "devices" is not a hypertable \set ON_ERROR_STOP 1 -- do nothing if node is not attached SELECT * FROM detach_data_node('data_node_2', 'disttable_2', if_attached => true); +WARNING: detaching data node is deprecated NOTICE: data node "data_node_2" is not attached to hypertable "disttable_2", skipping detach_data_node ------------------ @@ -1241,6 +1304,7 @@ SELECT * FROM hypertable_partitions; (4 rows) SELECT * FROM detach_data_node('data_node_3', 'disttable_2', force => true); +WARNING: detaching data node is deprecated WARNING: insufficient number of data nodes for distributed hypertable "disttable_2" NOTICE: the number of partitions in dimension "device" of hypertable "disttable_2" was decreased to 1 detach_data_node @@ -1273,6 +1337,7 @@ ORDER BY foreign_table_name; -- force detach data node with data SELECT * FROM detach_data_node('data_node_3', 'disttable', force => true); +WARNING: detaching data node is deprecated WARNING: distributed hypertable "disttable" is under-replicated detach_data_node ------------------ @@ -1322,6 +1387,7 @@ ORDER BY foreign_table_name; \set ON_ERROR_STOP 0 -- detaching data node with last data replica should ERROR even when forcing SELECT * FROM detach_data_node('server_2', 'disttable', force => true); +WARNING: detaching data node is deprecated ERROR: server "server_2" does not exist \set ON_ERROR_STOP 1 -- drop all chunks @@ -1344,6 +1410,7 @@ ORDER BY foreign_table_name; (0 rows) SELECT * FROM detach_data_node('data_node_2', 'disttable', force => true); +WARNING: detaching data node is deprecated WARNING: insufficient number of data nodes for distributed hypertable "disttable" NOTICE: the number of partitions in dimension "device" of hypertable "disttable" was decreased to 1 detach_data_node @@ -1354,12 +1421,14 @@ NOTICE: the number of partitions in dimension "device" of hypertable "disttable -- Let's add more data nodes SET ROLE :ROLE_CLUSTER_SUPERUSER; SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_4', host => 'localhost', database => :'DN_DBNAME_4'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_4 | db_data_node_4 | t | t | t (1 row) SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_5', host => 'localhost', database => :'DN_DBNAME_5'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_5 | db_data_node_5 | t | t | t @@ -1372,6 +1441,7 @@ GRANT CREATE ON SCHEMA public TO :ROLE_1; SET ROLE :ROLE_SUPERUSER; CREATE TABLE disttable_3(time timestamptz, device int, temp float); SELECT * FROM create_distributed_hypertable('disttable_3', 'time', replication_factor => 1, data_nodes => '{"data_node_4", "data_node_5"}'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+-------------+--------- @@ -1381,6 +1451,7 @@ NOTICE: adding not-null constraint to column "time" SET ROLE :ROLE_1; CREATE TABLE disttable_4(time timestamptz, device int, temp float); SELECT * FROM create_distributed_hypertable('disttable_4', 'time', replication_factor => 1, data_nodes => '{"data_node_4", "data_node_5"}'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+-------------+--------- @@ -1390,6 +1461,7 @@ NOTICE: adding not-null constraint to column "time" \set ON_ERROR_STOP 0 -- error due to missing permissions SELECT * FROM detach_data_node('data_node_4', 'disttable_3'); +WARNING: detaching data node is deprecated ERROR: must be owner of hypertable "disttable_3" SELECT * FROM timescaledb_experimental.block_new_chunks('data_node_4', 'disttable_3'); ERROR: must be owner of hypertable "disttable_3" @@ -1402,6 +1474,7 @@ ERROR: must be owner of hypertable "disttable_3" -- non-existing hypertables. CREATE TABLE disttable_5(time timestamptz, device int, temp float); SELECT * FROM create_distributed_hypertable('disttable_5', 'time', replication_factor => 1, data_nodes => '{"data_node_4", "data_node_5" }'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+-------------+--------- @@ -1409,6 +1482,7 @@ NOTICE: adding not-null constraint to column "time" (1 row) SELECT * FROM test.remote_exec('{ data_node_4 }', $$ SELECT hypertable_name, owner FROM timescaledb_information.hypertables; $$); +WARNING: executing remote command is deprecated NOTICE: [data_node_4]: SELECT hypertable_name, owner FROM timescaledb_information.hypertables NOTICE: [data_node_4]: hypertable_name|owner @@ -1431,6 +1505,7 @@ SET ROLE :ROLE_1; -- test first detach without permission to drop the remote data \set ON_ERROR_STOP 0 SELECT * FROM detach_data_node('data_node_4', drop_remote_data => true); +WARNING: detaching data node is deprecated NOTICE: skipping hypertable "disttable_3" due to missing permissions ERROR: [data_node_4]: must be owner of table disttable_5 \set ON_ERROR_STOP 1 @@ -1439,6 +1514,7 @@ RESET ROLE; CALL distributed_exec(format('ALTER TABLE disttable_5 OWNER TO %s', :'ROLE_1'), '{ data_node_4 }'); SET ROLE :ROLE_1; SELECT * FROM detach_data_node('data_node_4', drop_remote_data => true); +WARNING: detaching data node is deprecated NOTICE: skipping hypertable "disttable_3" due to missing permissions detach_data_node ------------------ @@ -1447,6 +1523,7 @@ NOTICE: skipping hypertable "disttable_3" due to missing permissions -- Hypertables user had permissions for should be dropped on data nodes SELECT * FROM test.remote_exec('{ data_node_4 }', $$ SELECT hypertable_name, owner FROM timescaledb_information.hypertables; $$); +WARNING: executing remote command is deprecated NOTICE: [data_node_4]: SELECT hypertable_name, owner FROM timescaledb_information.hypertables NOTICE: [data_node_4]: hypertable_name|owner @@ -1463,6 +1540,7 @@ disttable_3 |super_user -- Cleanup SET ROLE :ROLE_CLUSTER_SUPERUSER; SELECT * FROM delete_data_node('data_node_1', force =>true); +WARNING: deleting data node is deprecated WARNING: insufficient number of data nodes for distributed hypertable "disttable" WARNING: insufficient number of data nodes for distributed hypertable "disttable_2" delete_data_node @@ -1471,12 +1549,14 @@ WARNING: insufficient number of data nodes for distributed hypertable "disttabl (1 row) SELECT * FROM delete_data_node('data_node_2', force =>true); +WARNING: deleting data node is deprecated delete_data_node ------------------ t (1 row) SELECT * FROM delete_data_node('data_node_3', force =>true); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -1487,20 +1567,24 @@ SET ROLE :ROLE_1; -- Cannot delete a data node which is attached to a table that we don't -- have owner permissions on SELECT * FROM delete_data_node('data_node_4', force =>true); +WARNING: deleting data node is deprecated ERROR: permission denied for hypertable "disttable_3" SELECT * FROM delete_data_node('data_node_5', force =>true); +WARNING: deleting data node is deprecated ERROR: permission denied for hypertable "disttable_3" \set ON_ERROR_STOP 1 SET ROLE :ROLE_CLUSTER_SUPERUSER; DROP TABLE disttable_3; -- Now we should be able to delete the data nodes SELECT * FROM delete_data_node('data_node_4', force =>true); +WARNING: deleting data node is deprecated delete_data_node ------------------ t (1 row) SELECT * FROM delete_data_node('data_node_5', force =>true); +WARNING: deleting data node is deprecated WARNING: insufficient number of data nodes for distributed hypertable "disttable_4" WARNING: insufficient number of data nodes for distributed hypertable "disttable_5" delete_data_node @@ -1552,6 +1636,7 @@ CREATE EXTENSION timescaledb VERSION '0.0.0'; \set ON_ERROR_STOP 0 SELECT * FROM add_data_node('data_node_1', 'localhost', database => :'DN_DBNAME_1', bootstrap => false); +WARNING: adding data node is deprecated ERROR: remote PostgreSQL instance has an incompatible timescaledb extension version -- Testing that it is not possible to use oneself as a data node. This -- is not allowed since it would create a cycle. @@ -1562,6 +1647,7 @@ ERROR: remote PostgreSQL instance has an incompatible timescaledb extension ver -- We cannot use default verbosity here for debugging since the -- version number is printed in some of the notices. SELECT * FROM add_data_node('data_node_99', host => 'localhost'); +WARNING: adding data node is deprecated NOTICE: database "db_data_node" already exists on data node, skipping NOTICE: extension "timescaledb" already exists on data node, skipping ERROR: [data_node_99]: cannot add the current database as a data node to itself @@ -1580,6 +1666,7 @@ SET ROLE :ROLE_3; -- foreign data wrapper will fail. \set ON_ERROR_STOP 0 SELECT * FROM add_data_node('data_node_6', host => 'localhost', database => :'DN_DBNAME_6'); +WARNING: adding data node is deprecated ERROR: permission denied for foreign-data wrapper timescaledb_fdw \set ON_ERROR_STOP 1 RESET ROLE; @@ -1589,10 +1676,12 @@ SET ROLE :ROLE_3; -- ROLE_3 doesn't have a password in the passfile and has not way to -- authenticate so adding a data node will still fail. SELECT * FROM add_data_node('data_node_6', host => 'localhost', database => :'DN_DBNAME_6'); +WARNING: adding data node is deprecated ERROR: could not connect to "data_node_6" \set ON_ERROR_STOP 1 -- Providing the password on the command line should work SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_6', host => 'localhost', database => :'DN_DBNAME_6', password => :'ROLE_3_PASS'); +WARNING: adding data node is deprecated NOTICE: database "db_data_node_6" already exists on data node, skipping NOTICE: extension "timescaledb" already exists on data node, skipping node_name | database | node_created | database_created | extension_created @@ -1601,6 +1690,7 @@ NOTICE: extension "timescaledb" already exists on data node, skipping (1 row) SELECT * FROM delete_data_node('data_node_6'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -1617,18 +1707,21 @@ DROP DATABASE :DN_DBNAME_6 WITH (FORCE); -- Test alter_data_node() ----------------------------------------------- SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_1 | db_data_node_1 | t | t | t (1 row) SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_2 | db_data_node_2 | t | t | t (1 row) SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_3', host => 'localhost', database => :'DN_DBNAME_3'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+----------------+--------------+------------------+------------------- data_node_3 | db_data_node_3 | t | t | t @@ -1642,6 +1735,7 @@ CREATE TABLE hyper2 (LIKE hyper1); CREATE TABLE hyper3 (LIKE hyper1); CREATE TABLE hyper_1dim (LIKE hyper1); SELECT create_distributed_hypertable('hyper1', 'time', 'location', replication_factor=>1); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -1649,6 +1743,7 @@ NOTICE: adding not-null constraint to column "time" (1 row) SELECT create_distributed_hypertable('hyper2', 'time', 'location', replication_factor=>2); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -1656,6 +1751,7 @@ NOTICE: adding not-null constraint to column "time" (1 row) SELECT create_distributed_hypertable('hyper3', 'time', 'location', replication_factor=>3); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -1663,6 +1759,7 @@ NOTICE: adding not-null constraint to column "time" (1 row) SELECT create_distributed_hypertable('hyper_1dim', 'time', chunk_time_interval=>interval '2 days', replication_factor=>3); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -1710,8 +1807,10 @@ SELECT * FROM chunk_query_data_node; \set ON_ERROR_STOP 0 -- must be owner to alter a data node SELECT * FROM alter_data_node('data_node_1', available=>false); +WARNING: altering data node is deprecated ERROR: must be owner of foreign server data_node_1 SELECT * FROM alter_data_node('data_node_1', port=>8989); +WARNING: altering data node is deprecated ERROR: must be owner of foreign server data_node_1 \set ON_ERROR_STOP 1 -- query some data from all hypertables to show its working before @@ -1758,6 +1857,7 @@ ERROR: could not connect to "data_node_1" \set ON_ERROR_STOP 1 -- alter the node as not available SELECT * FROM alter_data_node('data_node_1', available=>false); +WARNING: altering data node is deprecated WARNING: could not switch data node on 1 chunks node_name | host | port | database | available -------------+-----------+-------+----------------+----------- @@ -1909,6 +2009,7 @@ SELECT * FROM hyper_1dim WHERE time = '2022-01-03 00:00:05'; INSERT INTO _timescaledb_internal._dist_hyper_12_24_chunk VALUES ('2022-01-11 00:00:00', 1, 1); INSERT INTO _timescaledb_internal._dist_hyper_13_25_chunk VALUES ('2022-01-11 00:00:00', 1, 1); SELECT * FROM test.remote_exec(ARRAY['data_node_2', 'data_node_3'], $$ SELECT * FROM _timescaledb_internal._dist_hyper_12_24_chunk WHERE time = '2022-01-11 00:00:00'; $$); +WARNING: executing remote command is deprecated NOTICE: [data_node_2]: SELECT * FROM _timescaledb_internal._dist_hyper_12_24_chunk WHERE time = '2022-01-11 00:00:00' NOTICE: [data_node_2]: time |location|temp @@ -1931,6 +2032,7 @@ Tue Jan 11 00:00:00 2022 PST| 1| 1 (1 row) SELECT * FROM test.remote_exec(ARRAY['data_node_2', 'data_node_3'], $$ SELECT * FROM _timescaledb_internal._dist_hyper_13_25_chunk WHERE time = '2022-01-11 00:00:00'; $$); +WARNING: executing remote command is deprecated NOTICE: [data_node_2]: SELECT * FROM _timescaledb_internal._dist_hyper_13_25_chunk WHERE time = '2022-01-11 00:00:00' NOTICE: [data_node_2]: time |location|temp @@ -1986,6 +2088,7 @@ ERROR: some data nodes are not available for DDL commands -- Mark all DNs unavailable. Metadata should still retain last DN but all -- activity should fail SELECT * FROM alter_data_node('data_node_2', available=>false); +WARNING: altering data node is deprecated WARNING: could not switch data node on 2 chunks node_name | host | port | database | available -------------+-----------+-------+----------------+----------- @@ -1993,6 +2096,7 @@ WARNING: could not switch data node on 2 chunks (1 row) SELECT * FROM alter_data_node('data_node_3', available=>false); +WARNING: altering data node is deprecated WARNING: could not switch data node on 11 chunks node_name | host | port | database | available -------------+-----------+-------+----------------+----------- @@ -2027,6 +2131,7 @@ ERROR: some data nodes are not available for DDL commands ALTER DATABASE data_node_1_unavailable RENAME TO :DN_DBNAME_1; WARNING: you need to manually restart any running background workers after this command SELECT * FROM alter_data_node('data_node_1', available=>true); +WARNING: altering data node is deprecated WARNING: insufficient number of data nodes WARNING: insufficient number of data nodes node_name | host | port | database | available @@ -2035,6 +2140,7 @@ WARNING: insufficient number of data nodes (1 row) SELECT * FROM alter_data_node('data_node_2', available=>true); +WARNING: altering data node is deprecated WARNING: insufficient number of data nodes WARNING: insufficient number of data nodes WARNING: insufficient number of data nodes @@ -2045,6 +2151,7 @@ WARNING: insufficient number of data nodes (1 row) SELECT * FROM alter_data_node('data_node_3', available=>true); +WARNING: altering data node is deprecated node_name | host | port | database | available -------------+-----------+-------+----------------+----------- data_node_3 | localhost | 55432 | db_data_node_3 | t @@ -2116,6 +2223,7 @@ SELECT node_name, options FROM timescaledb_information.data_nodes order by node_ (3 rows) SELECT * FROM alter_data_node('data_node_1', available=>true, host=>'foo.bar', port=>8989, database=>'new_db'); +WARNING: altering data node is deprecated node_name | host | port | database | available -------------+---------+------+----------+----------- data_node_1 | foo.bar | 8989 | new_db | t @@ -2131,6 +2239,7 @@ SELECT node_name, options FROM timescaledb_information.data_nodes order by node_ -- just show current options: SELECT * FROM alter_data_node('data_node_1'); +WARNING: altering data node is deprecated node_name | host | port | database | available -------------+---------+------+----------+----------- data_node_1 | foo.bar | 8989 | new_db | t @@ -2143,17 +2252,22 @@ DROP TABLE hyper_1dim; \set ON_ERROR_STOP 0 -- test some error cases SELECT * FROM alter_data_node(NULL); +WARNING: altering data node is deprecated ERROR: data node name cannot be NULL SELECT * FROM alter_data_node('does_not_exist'); +WARNING: altering data node is deprecated ERROR: server "does_not_exist" does not exist SELECT * FROM alter_data_node('data_node_1', port=>89000); +WARNING: altering data node is deprecated ERROR: invalid port number 89000 -- cannot delete data node with "drop_database" since configuration is wrong SELECT delete_data_node('data_node_1', drop_database=>true); +WARNING: deleting data node is deprecated ERROR: could not connect to data node "data_node_1" \set ON_ERROR_STOP 1 -- restore configuration for data_node_1 SELECT * FROM alter_data_node('data_node_1', host=>'localhost', port=>:old_port, database=>:'DN_DBNAME_1'); +WARNING: altering data node is deprecated node_name | host | port | database | available -------------+-----------+-------+----------------+----------- data_node_1 | localhost | 55432 | db_data_node_1 | t @@ -2171,18 +2285,21 @@ DROP VIEW chunk_query_data_node; -- create new session to clear out connection cache \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; SELECT delete_data_node('data_node_1', drop_database=>true); +WARNING: deleting data node is deprecated delete_data_node ------------------ t (1 row) SELECT delete_data_node('data_node_2', drop_database=>true); +WARNING: deleting data node is deprecated delete_data_node ------------------ t (1 row) SELECT delete_data_node('data_node_3', drop_database=>true); +WARNING: deleting data node is deprecated delete_data_node ------------------ t diff --git a/tsl/test/expected/data_node_bootstrap.out b/tsl/test/expected/data_node_bootstrap.out index 3064dd87c0d..d03bdb91214 100644 --- a/tsl/test/expected/data_node_bootstrap.out +++ b/tsl/test/expected/data_node_bootstrap.out @@ -16,6 +16,7 @@ SELECT QUOTE_LITERAL(PG_ENCODING_TO_CHAR(encoding)) AS enc \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------+----------------+--------------+------------------+------------------- bootstrap_test | bootstrap_test | t | t | t @@ -44,6 +45,7 @@ SELECT PG_ENCODING_TO_CHAR(encoding) = :enc -- After delete_data_node, the database and extension should still -- exist on the data node SELECT * FROM delete_data_node('bootstrap_test'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -66,8 +68,10 @@ SELECT extname, extnamespace::regnamespace FROM pg_extension e WHERE extname = ' -- Trying to add the data node again should fail, with or without -- bootstrapping. SELECT add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap=>false); +WARNING: adding data node is deprecated ERROR: cannot add "bootstrap_test" as a data node SELECT add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test'); +WARNING: adding data node is deprecated NOTICE: database "bootstrap_test" already exists on data node, skipping NOTICE: extension "timescaledb" already exists on data node, skipping ERROR: cannot add "bootstrap_test" as a data node @@ -79,6 +83,7 @@ DROP DATABASE bootstrap_test WITH (FORCE); \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap => true); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------+----------------+--------------+------------------+------------------- bootstrap_test | bootstrap_test | t | t | t @@ -91,6 +96,7 @@ SELECT * FROM show_data_nodes(); (1 row) SELECT * FROM delete_data_node('bootstrap_test'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -111,6 +117,7 @@ DELETE FROM _timescaledb_catalog.metadata WHERE key = 'dist_uuid'; \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap => false); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------+----------------+--------------+------------------+------------------- bootstrap_test | bootstrap_test | t | f | f @@ -121,11 +128,13 @@ FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap -- transaction block since it is non-transactional. BEGIN; SELECT * FROM delete_data_node('bootstrap_test', drop_database => true); +WARNING: deleting data node is deprecated ERROR: delete_data_node() cannot run inside a transaction block ROLLBACK; \set ON_ERROR_STOP 1 CREATE TABLE conditions (time timestamptz, device int, temp float); SELECT create_distributed_hypertable('conditions', 'time', 'device'); +WARNING: distributed hypertable is deprecated WARNING: only one data node was assigned to the hypertable NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -136,17 +145,20 @@ NOTICE: adding not-null constraint to column "time" \set ON_ERROR_STOP 0 -- Should fail because the data node is the last one SELECT * FROM delete_data_node('bootstrap_test', drop_database => true); +WARNING: deleting data node is deprecated ERROR: insufficient number of data nodes for distributed hypertable "conditions" \set ON_ERROR_STOP 1 -- Add another data node SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test_2', host => 'localhost', database => 'bootstrap_test_2', bootstrap => true); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ------------------+------------------+--------------+------------------+------------------- bootstrap_test_2 | bootstrap_test_2 | t | t | t (1 row) SELECT attach_data_node('bootstrap_test_2', 'conditions'); +WARNING: attaching data node is deprecated NOTICE: the number of partitions in dimension "device" was increased to 2 attach_data_node ------------------------ @@ -158,6 +170,7 @@ INSERT INTO conditions VALUES ('2021-12-01 10:30', 2, 20.3); \set ON_ERROR_STOP 0 -- Should fail because the data node still holds data SELECT * FROM delete_data_node('bootstrap_test_2', drop_database => true); +WARNING: deleting data node is deprecated ERROR: insufficient number of data nodes \set ON_ERROR_STOP 1 -- Data node's database still exists after failure to delete @@ -175,6 +188,7 @@ SELECT drop_chunks('conditions', older_than => '2022-01-01'::timestamptz); (1 row) SELECT * FROM delete_data_node('bootstrap_test_2', drop_database => true); +WARNING: deleting data node is deprecated NOTICE: the number of partitions in dimension "device" of hypertable "conditions" was decreased to 1 delete_data_node ------------------ @@ -205,6 +219,7 @@ DROP TABLE conditions; -- Now drop the data node and it should clear the connection from the -- cache first SELECT * FROM delete_data_node('bootstrap_test', drop_database => true); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -218,6 +233,7 @@ ERROR: database "bootstrap_test" does not exist -- Adding the data node again should work SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap => true); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------+----------------+--------------+------------------+------------------- bootstrap_test | bootstrap_test | t | t | t @@ -228,11 +244,13 @@ DROP DATABASE bootstrap_test WITH (FORCE); \set ON_ERROR_STOP 0 -- Expect an error since the database does not exist. SELECT * FROM delete_data_node('bootstrap_test', drop_database => true); +WARNING: deleting data node is deprecated ERROR: [bootstrap_test]: database "bootstrap_test" does not exist \set ON_ERROR_STOP 1 -- Delete it without the drop_database option set since the database -- was manually deleted. SELECT * FROM delete_data_node('bootstrap_test'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -250,12 +268,14 @@ SET client_min_messages TO NOTICE; \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap => false); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------+----------------+--------------+------------------+------------------- bootstrap_test | bootstrap_test | t | f | f (1 row) SELECT * FROM delete_data_node('bootstrap_test'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -275,6 +295,7 @@ SET client_min_messages TO NOTICE; \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap => true); +WARNING: adding data node is deprecated NOTICE: database "bootstrap_test" already exists on data node, skipping NOTICE: extension "timescaledb" already exists on data node, skipping node_name | database | node_created | database_created | extension_created @@ -283,6 +304,7 @@ NOTICE: extension "timescaledb" already exists on data node, skipping (1 row) SELECT * FROM delete_data_node('bootstrap_test'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -302,6 +324,7 @@ CREATE DATABASE bootstrap_test \set ON_ERROR_STOP 0 SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap => true); +WARNING: adding data node is deprecated ERROR: database exists but has wrong encoding \set ON_ERROR_STOP 1 DROP DATABASE bootstrap_test WITH (FORCE); @@ -336,6 +359,7 @@ SET client_min_messages TO NOTICE; \set ON_ERROR_STOP 0 SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap => false); +WARNING: adding data node is deprecated ERROR: database exists but has wrong encoding \set ON_ERROR_STOP 1 DROP DATABASE bootstrap_test WITH (FORCE); @@ -353,6 +377,7 @@ SET client_min_messages TO NOTICE; \set ON_ERROR_STOP 0 SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap => false); +WARNING: adding data node is deprecated ERROR: database exists but has wrong collation \set ON_ERROR_STOP 1 DROP DATABASE bootstrap_test WITH (FORCE); @@ -370,6 +395,7 @@ SET client_min_messages TO NOTICE; \set ON_ERROR_STOP 0 SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap => false); +WARNING: adding data node is deprecated ERROR: database exists but has wrong LC_CTYPE \set ON_ERROR_STOP 1 DROP DATABASE bootstrap_test WITH (FORCE); @@ -379,6 +405,7 @@ DROP DATABASE bootstrap_test WITH (FORCE); \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap => true); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------+----------------+--------------+------------------+------------------- bootstrap_test | bootstrap_test | t | t | t @@ -391,6 +418,7 @@ SELECT * FROM show_data_nodes(); (1 row) SELECT * FROM delete_data_node('bootstrap_test'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -400,6 +428,7 @@ DROP DATABASE bootstrap_test WITH (FORCE); \set ON_ERROR_STOP 0 SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap => false); +WARNING: adding data node is deprecated ERROR: could not connect to "bootstrap_test" \set ON_ERROR_STOP 1 ----------------------------------------------------------------------- @@ -410,12 +439,14 @@ ERROR: could not connect to "bootstrap_test" \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap => true); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------+----------------+--------------+------------------+------------------- bootstrap_test | bootstrap_test | t | t | t (1 row) SELECT * FROM delete_data_node('bootstrap_test'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -438,6 +469,7 @@ SELECT extname FROM pg_extension WHERE extname = 'timescaledb'; \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap => false); +WARNING: adding data node is deprecated ERROR: cannot add "bootstrap_test" as a data node \set ON_ERROR_STOP 1 DROP DATABASE bootstrap_test WITH (FORCE); @@ -474,9 +506,11 @@ SELECT node_name, database, node_created, database_created, extension_created FROM ts_non_default.add_data_node( 'bootstrap_test', host => 'localhost', database => 'bootstrap_test', bootstrap => true); +WARNING: adding data node is deprecated NOTICE: database "bootstrap_test" already exists on data node, skipping ERROR: schema "ts_non_default" already exists in database, aborting SELECT * FROM ts_non_default.delete_data_node('bootstrap_test'); +WARNING: deleting data node is deprecated ERROR: server "bootstrap_test" does not exist \set ON_ERROR_STOP 1 \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER @@ -489,6 +523,7 @@ BEGIN; \set ON_ERROR_STOP 0 SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('bootstrap_test', host => 'localhost', database => 'bootstrap_test'); +WARNING: adding data node is deprecated ERROR: add_data_node() cannot run inside a transaction block \set ON_ERROR_STOP 1 COMMIT; @@ -509,12 +544,16 @@ END; $$ LANGUAGE plpgsql; \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; SELECT test_database_name('Unusual Name'); +WARNING: adding data node is deprecated +WARNING: deleting data node is deprecated test_database_name -------------------- (1 row) SELECT test_database_name(U&'\0441\043B\043E\043D'); +WARNING: adding data node is deprecated +WARNING: deleting data node is deprecated test_database_name -------------------- @@ -544,6 +583,7 @@ SELECT label FROM pg_shseclabel SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('drop_db_test_dn', host => 'localhost', database => 'drop_db_test_dn'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------+-----------------+--------------+------------------+------------------- drop_db_test_dn | drop_db_test_dn | t | t | t diff --git a/tsl/test/expected/debug_notice.out b/tsl/test/expected/debug_notice.out index d0f2e755b29..b40269bb4ba 100644 --- a/tsl/test/expected/debug_notice.out +++ b/tsl/test/expected/debug_notice.out @@ -18,6 +18,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------------+-------------------+--------------+------------------+------------------- db_debug_notice_1 | db_debug_notice_1 | t | t | t @@ -28,6 +31,7 @@ FROM ( GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; CREATE TABLE hyper (time timestamptz, device int, location int, temp float); SELECT * FROM create_distributed_hypertable('hyper', 'time', 'device'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- diff --git a/tsl/test/expected/dist_api_calls.out b/tsl/test/expected/dist_api_calls.out index 3aa132d15e2..c779874724c 100644 --- a/tsl/test/expected/dist_api_calls.out +++ b/tsl/test/expected/dist_api_calls.out @@ -16,6 +16,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ---------------------+---------------------+--------------+------------------+------------------- db_dist_api_calls_1 | db_dist_api_calls_1 | t | t | t @@ -34,6 +37,7 @@ CREATE TABLE disttable( value float ); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 3); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 1 | public | disttable | t @@ -62,6 +66,7 @@ SELECT * FROM disttable ORDER BY time; (8 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT show_chunks('disttable'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_api_calls_1]: SELECT show_chunks('disttable') NOTICE: [db_dist_api_calls_1]: show_chunks @@ -114,6 +119,7 @@ SELECT * FROM disttable ORDER BY time; (4 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT show_chunks('disttable'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_api_calls_1]: SELECT show_chunks('disttable') NOTICE: [db_dist_api_calls_1]: show_chunks @@ -174,6 +180,7 @@ SELECT * FROM show_chunks('disttable'); (6 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT show_chunks('disttable'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_api_calls_1]: SELECT show_chunks('disttable') NOTICE: [db_dist_api_calls_1]: show_chunks @@ -249,6 +256,7 @@ SELECT approximate_row_count('disttable'); (1 row) SELECT * FROM test.remote_exec(NULL, $$ SELECT approximate_row_count('disttable'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_api_calls_1]: SELECT approximate_row_count('disttable') NOTICE: [db_dist_api_calls_1]: approximate_row_count @@ -286,6 +294,7 @@ CREATE TABLE disttable_repl( value float ); SELECT * FROM create_distributed_hypertable('disttable_repl', 'time', 'device', 3, replication_factor => 2); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+----------------+--------- 2 | public | disttable_repl | t @@ -325,6 +334,7 @@ SELECT * FROM show_chunks('disttable_repl'); (6 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT show_chunks('disttable_repl'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_api_calls_1]: SELECT show_chunks('disttable_repl') NOTICE: [db_dist_api_calls_1]: show_chunks diff --git a/tsl/test/expected/dist_backup.out b/tsl/test/expected/dist_backup.out index 76ce5f12e7b..00f5d79618a 100644 --- a/tsl/test/expected/dist_backup.out +++ b/tsl/test/expected/dist_backup.out @@ -14,6 +14,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ------------------+------------------+--------------+------------------+------------------- db_dist_backup_1 | db_dist_backup_1 | t | t | t @@ -33,12 +36,14 @@ SELECT create_distributed_restore_point(NULL); -- test long restore point name (>= 64 chars) \set ON_ERROR_STOP 0 SELECT create_distributed_restore_point('0123456789012345678901234567890123456789012345678901234567890123456789'); +WARNING: creating distributed restore point is deprecated ERROR: restore point name is too long \set ON_ERROR_STOP 1 -- test super user check \set ON_ERROR_STOP 0 SET ROLE :ROLE_1; SELECT create_distributed_restore_point('test'); +WARNING: creating distributed restore point is deprecated ERROR: must be superuser to create restore point RESET ROLE; \set ON_ERROR_STOP 1 @@ -46,6 +51,7 @@ RESET ROLE; SET timescaledb.enable_2pc = false; \set ON_ERROR_STOP 0 SELECT create_distributed_restore_point('test'); +WARNING: creating distributed restore point is deprecated ERROR: two-phase commit transactions are not enabled \set ON_ERROR_STOP 1 SET timescaledb.enable_2pc = true; @@ -84,6 +90,7 @@ DROP DATABASE dist_rp_test WITH (FORCE); \c :DATA_NODE_1 :ROLE_CLUSTER_SUPERUSER; \set ON_ERROR_STOP 0 SELECT create_distributed_restore_point('test'); +WARNING: creating distributed restore point is deprecated ERROR: distributed restore point must be created on the access node \set ON_ERROR_STOP 1 \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; @@ -96,6 +103,11 @@ ERROR: [db_dist_backup_x]: distributed restore point must be created on the acc \set ON_ERROR_STOP 1 -- test on access node SELECT node_name, node_type, pg_lsn(restore_point) > pg_lsn('0/0') as valid_lsn FROM create_distributed_restore_point('dist_rp') ORDER BY node_name; +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated node_name | node_type | valid_lsn ------------------+-------------+----------- db_dist_backup_1 | data_node | t @@ -106,6 +118,11 @@ SELECT node_name, node_type, pg_lsn(restore_point) > pg_lsn('0/0') as valid_lsn -- restore point can be have duplicates SELECT node_name, node_type, pg_lsn(restore_point) > pg_lsn('0/0') as valid_lsn FROM create_distributed_restore_point('dist_rp') ORDER BY node_name; +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated node_name | node_type | valid_lsn ------------------+-------------+----------- db_dist_backup_1 | data_node | t @@ -116,7 +133,17 @@ SELECT node_name, node_type, pg_lsn(restore_point) > pg_lsn('0/0') as valid_lsn -- make sure each new restore point have lsn greater then previous one (access node lsn) SELECT restore_point as lsn_1 FROM create_distributed_restore_point('dist_rp_1') WHERE node_type = 'access_node' \gset +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated SELECT restore_point as lsn_2 FROM create_distributed_restore_point('dist_rp_2') WHERE node_type = 'access_node' \gset +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated SELECT pg_lsn(:'lsn_2') > pg_lsn(:'lsn_1') as valid_lsn; valid_lsn ----------- @@ -133,6 +160,7 @@ SELECT pg_lsn(:'lsn_3') > pg_lsn(:'lsn_2') as valid_lsn; -- test create_distributed_restore_point() when one of the nodes if unavailable SELECT alter_data_node(:'DATA_NODE_1', available => false); +WARNING: altering data node is deprecated alter_data_node ------------------------------------------------------- (db_dist_backup_1,localhost,55432,db_dist_backup_1,f) @@ -140,15 +168,22 @@ SELECT alter_data_node(:'DATA_NODE_1', available => false); \set ON_ERROR_STOP 0 SELECT create_distributed_restore_point('test'); +WARNING: creating distributed restore point is deprecated ERROR: some data nodes are not available \set ON_ERROR_STOP 1 SELECT alter_data_node(:'DATA_NODE_1', available => true); +WARNING: altering data node is deprecated alter_data_node ------------------------------------------------------- (db_dist_backup_1,localhost,55432,db_dist_backup_1,t) (1 row) SELECT node_name, node_type, pg_lsn(restore_point) > pg_lsn('0/0') as valid_lsn FROM create_distributed_restore_point('test') ORDER BY node_name; +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated +WARNING: creating distributed restore point is deprecated node_name | node_type | valid_lsn ------------------+-------------+----------- db_dist_backup_1 | data_node | t diff --git a/tsl/test/expected/dist_cagg.out b/tsl/test/expected/dist_cagg.out index 8c06b94f58f..ffba146d342 100644 --- a/tsl/test/expected/dist_cagg.out +++ b/tsl/test/expected/dist_cagg.out @@ -16,6 +16,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------+----------------+--------------+------------------+------------------- db_dist_cagg_1 | db_dist_cagg_1 | t | t | t @@ -30,6 +33,7 @@ GRANT CREATE ON SCHEMA public TO :ROLE_1; -- create cagg on distributed hypertable CREATE TABLE conditions_dist(day timestamptz NOT NULL, temperature INT NOT NULL); SELECT create_distributed_hypertable('conditions_dist', 'day', chunk_time_interval => INTERVAL '1 day', replication_factor => 2); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (1,public,conditions_dist,t) @@ -56,12 +60,14 @@ SELECT * FROM conditions_dist_1m ORDER BY bucket; -- case 1: ensure select works when data node is unavailable SELECT alter_data_node(:'DATA_NODE_1'); +WARNING: altering data node is deprecated alter_data_node --------------------------------------------------- (db_dist_cagg_1,localhost,55432,db_dist_cagg_1,t) (1 row) SELECT alter_data_node(:'DATA_NODE_1', port => 55433, available => false); +WARNING: altering data node is deprecated alter_data_node --------------------------------------------------- (db_dist_cagg_1,localhost,55433,db_dist_cagg_1,f) @@ -109,12 +115,14 @@ ERROR: could not connect to "db_dist_cagg_1" \set ON_ERROR_STOP 1 -- case 7: ensure data node update works when it becomes available again SELECT alter_data_node(:'DATA_NODE_1', port => 55432); +WARNING: altering data node is deprecated alter_data_node --------------------------------------------------- (db_dist_cagg_1,localhost,55432,db_dist_cagg_1,f) (1 row) SELECT alter_data_node(:'DATA_NODE_1', available => true); +WARNING: altering data node is deprecated alter_data_node --------------------------------------------------- (db_dist_cagg_1,localhost,55432,db_dist_cagg_1,t) diff --git a/tsl/test/expected/dist_commands.out b/tsl/test/expected/dist_commands.out index 31f2c529514..5708895fdc3 100644 --- a/tsl/test/expected/dist_commands.out +++ b/tsl/test/expected/dist_commands.out @@ -14,6 +14,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created --------------------+--------------------+--------------+------------------+------------------- db_dist_commands_1 | db_dist_commands_1 | t | t | t @@ -170,12 +173,14 @@ SELECT is_access_node_session_on_data_node(); -- Ensure peer dist id is already set and can be set only once \set ON_ERROR_STOP 0 SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ SELECT * FROM _timescaledb_functions.set_peer_dist_id('77348176-09da-4a80-bc78-e31bdf5e63ec'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_commands_1]: SELECT * FROM _timescaledb_functions.set_peer_dist_id('77348176-09da-4a80-bc78-e31bdf5e63ec') ERROR: [db_dist_commands_1]: distributed peer ID already set \set ON_ERROR_STOP 1 -- Repeat is_access_node_session_on_data_node() test again, but this time using connections openned from -- access node to data nodes. Must return true. SELECT * FROM test.remote_exec(NULL, $$ SELECT is_access_node_session_on_data_node(); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_commands_1]: SELECT is_access_node_session_on_data_node() NOTICE: [db_dist_commands_1]: is_access_node_session_on_data_node @@ -247,6 +252,7 @@ CALL distributed_exec('CREATE TABLE dist_test (id int)'); CALL distributed_exec('INSERT INTO dist_test values (7)'); -- Test INSERTING data using empty array of data nodes (same behavior as not specifying). SELECT * FROM test.remote_exec(NULL, $$ SELECT * from dist_test; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_commands_1]: SELECT * from dist_test NOTICE: [db_dist_commands_1]: id @@ -291,6 +297,7 @@ $$); ERROR: [db_dist_commands_x]: role "dist_test_role" already exists \set ON_ERROR_STOP 1 SELECT * FROM test.remote_exec(NULL, $$ SELECT 1 from pg_catalog.pg_roles WHERE rolname = 'dist_test_role'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_commands_1]: SELECT 1 from pg_catalog.pg_roles WHERE rolname = 'dist_test_role' NOTICE: [db_dist_commands_1]: ?column? @@ -345,18 +352,21 @@ SELECT * FROM _timescaledb_functions.health() ORDER BY 1 NULLS FIRST; (4 rows) SELECT * FROM delete_data_node(:'DATA_NODE_1'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t (1 row) SELECT * FROM delete_data_node(:'DATA_NODE_2'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t (1 row) SELECT * FROM delete_data_node(:'DATA_NODE_3'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -385,6 +395,7 @@ CREATE SERVER myserver FOREIGN DATA WRAPPER postgres_fdw OPTIONS (host 'foo', dbname 'foodb', port '5432'); \set ON_ERROR_STOP 0 SELECT * FROM test.remote_exec('{myserver}', $$ SELECT 1; $$); +WARNING: executing remote command is deprecated ERROR: data node "myserver" is not a TimescaleDB server \set ON_ERROR_STOP 1 DROP SERVER myserver; @@ -401,12 +412,14 @@ DROP EXTENSION postgres_fdw; -- other databases above that prevents this. \c :TEST_DBNAME :ROLE_SUPERUSER SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('dist_commands_1', host => 'localhost', database => :'DATA_NODE_1'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------+--------------------+--------------+------------------+------------------- dist_commands_1 | db_dist_commands_1 | t | t | t (1 row) SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('dist_commands_2', host => 'localhost', database => :'DATA_NODE_2'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------+--------------------+--------------+------------------+------------------- dist_commands_2 | db_dist_commands_2 | t | t | t @@ -444,6 +457,7 @@ COMMIT; \set ON_ERROR_STOP 1 -- No changes should be there SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM my_table; $$); +WARNING: executing remote command is deprecated NOTICE: [dist_commands_1]: SELECT * FROM my_table NOTICE: [dist_commands_1]: key|value @@ -470,6 +484,7 @@ CALL distributed_exec($$ INSERT INTO my_table VALUES (2, 'bar'); $$); COMMIT; -- We should see changes SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM my_table; $$); +WARNING: executing remote command is deprecated NOTICE: [dist_commands_1]: SELECT * FROM my_table NOTICE: [dist_commands_1]: key|value @@ -506,6 +521,7 @@ COMMIT; \set ON_ERROR_STOP 1 -- We should see no changes SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM my_table; $$); +WARNING: executing remote command is deprecated NOTICE: [dist_commands_1]: SELECT * FROM my_table NOTICE: [dist_commands_1]: key|value diff --git a/tsl/test/expected/dist_compression.out b/tsl/test/expected/dist_compression.out index a6172b0cd97..a6430108ef5 100644 --- a/tsl/test/expected/dist_compression.out +++ b/tsl/test/expected/dist_compression.out @@ -33,6 +33,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------------+-----------------------+--------------+------------------+------------------- db_dist_compression_1 | db_dist_compression_1 | t | t | t @@ -47,6 +50,7 @@ SET ROLE :ROLE_1; CREATE TABLE compressed(time timestamptz, device int, temp float); -- Replicate twice to see that compress_chunk compresses all replica chunks SELECT create_distributed_hypertable('compressed', 'time', 'device', replication_factor => 2); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -95,6 +99,7 @@ SELECT table_name, compressed_hypertable_id FROM _timescaledb_catalog.hypertable WHERE table_name = 'compressed'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_compression_1]: SELECT table_name, compressed_hypertable_id FROM _timescaledb_catalog.hypertable @@ -290,6 +295,7 @@ ORDER BY chunk_name, node_name; SELECT test.remote_exec(NULL, $$ SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk ORDER BY id; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_compression_1]: SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk ORDER BY id NOTICE: [db_dist_compression_1]: @@ -591,6 +597,7 @@ SELECT * FROM test.remote_exec( NULL, WHERE attname = 'device_id' OR attname = 'new_intcol' and hypertable_id = (SELECT id from _timescaledb_catalog.hypertable WHERE table_name = 'compressed' ) ORDER BY attname; $$ ); +WARNING: executing remote command is deprecated NOTICE: [db_dist_compression_1]: SELECT * FROM _timescaledb_catalog.hypertable_compression WHERE attname = 'device_id' OR attname = 'new_intcol' and hypertable_id = (SELECT id from _timescaledb_catalog.hypertable @@ -654,6 +661,7 @@ CREATE TABLE conditions ( humidity DOUBLE PRECISION NULL ); SELECT create_distributed_hypertable('conditions', 'time', chunk_time_interval => '31days'::interval, replication_factor => 2); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (2,public,conditions,t) @@ -767,6 +775,7 @@ DROP TABLE IF EXISTS conditions CASCADE; --smallint tests CREATE TABLE test_table_smallint(time smallint, val int); SELECT create_distributed_hypertable('test_table_smallint', 'time', chunk_time_interval => 1, replication_factor => 2); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ---------------------------------- @@ -809,6 +818,7 @@ ORDER BY chunk_name; --integer tests CREATE TABLE test_table_integer(time int, val int); SELECT create_distributed_hypertable('test_table_integer', 'time', chunk_time_interval => 1, replication_factor => 2); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable --------------------------------- @@ -851,6 +861,7 @@ ORDER BY chunk_name; --bigint tests CREATE TABLE test_table_bigint(time bigint, val int); SELECT create_distributed_hypertable('test_table_bigint', 'time', chunk_time_interval => 1, replication_factor => 2); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable -------------------------------- @@ -893,6 +904,7 @@ ORDER BY chunk_name; --TEST8 insert into compressed chunks on dist. hypertable CREATE TABLE test_recomp_int(time bigint, val int); SELECT create_distributed_hypertable('test_recomp_int', 'time', chunk_time_interval => 20); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -1271,6 +1283,7 @@ NOTICE: drop cascades to table _timescaledb_internal._hyper_7_27_chunk -- test compression default handling CREATE TABLE test_defaults(time timestamptz NOT NULL, device_id int); SELECT create_distributed_hypertable('test_defaults','time'); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (8,public,test_defaults,t) @@ -1325,6 +1338,7 @@ SELECT * FROM test_defaults ORDER BY 1,2; -- test dropping columns from compressed CREATE TABLE test_drop(f1 text, f2 text, f3 text, time timestamptz, device int, o1 text, o2 text); SELECT create_distributed_hypertable('test_drop','time'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -1409,6 +1423,7 @@ ORDER BY 1; -- test ADD COLUMN IF NOT EXISTS on a distributed hypertable CREATE TABLE metric (time TIMESTAMPTZ NOT NULL, val FLOAT8 NOT NULL, dev_id INT4 NOT NULL); SELECT create_distributed_hypertable('metric', 'time'); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (10,public,metric,t) @@ -1506,6 +1521,7 @@ SELECT * FROM metric where medium is not null ORDER BY time LIMIT 1; -- create compressed distributed hypertable CREATE TABLE compressed(time timestamptz NOT NULL, device int, temp float); SELECT create_distributed_hypertable('compressed', 'time', 'device', replication_factor => 3); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (11,public,compressed,t) @@ -1526,6 +1542,7 @@ ORDER BY chunk; (3 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('compressed'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_compression_1]: SELECT * from show_chunks('compressed') NOTICE: [db_dist_compression_1]: show_chunks @@ -1580,6 +1597,7 @@ SELECT count(*) FROM compressed; -- make data node unavailable \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER SELECT alter_data_node(:'DATA_NODE_1', port => 55433, available => false); +WARNING: altering data node is deprecated WARNING: could not switch data node on 5 chunks alter_data_node ----------------------------------------------------------------- @@ -1611,6 +1629,7 @@ SELECT * from show_chunks('compressed'); (3 rows) SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * from show_chunks('compressed'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_compression_2]: SELECT * from show_chunks('compressed') NOTICE: [db_dist_compression_2]: show_chunks @@ -1645,12 +1664,14 @@ SELECT count(*) FROM compressed; -- make data node available again \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER SELECT alter_data_node(:'DATA_NODE_1', port => 55432); +WARNING: altering data node is deprecated alter_data_node ----------------------------------------------------------------- (db_dist_compression_1,localhost,55432,db_dist_compression_1,f) (1 row) SELECT alter_data_node(:'DATA_NODE_1', available => true); +WARNING: altering data node is deprecated alter_data_node ----------------------------------------------------------------- (db_dist_compression_1,localhost,55432,db_dist_compression_1,t) @@ -1667,6 +1688,7 @@ SELECT * from show_chunks('compressed'); (3 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('compressed'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_compression_2]: SELECT * from show_chunks('compressed') NOTICE: [db_dist_compression_2]: show_chunks @@ -1722,6 +1744,7 @@ SELECT * from show_chunks('compressed'); (3 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('compressed'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_compression_2]: SELECT * from show_chunks('compressed') NOTICE: [db_dist_compression_2]: show_chunks diff --git a/tsl/test/expected/dist_copy_available_dns.out b/tsl/test/expected/dist_copy_available_dns.out index 8fbe235f6eb..07e2b1c416d 100644 --- a/tsl/test/expected/dist_copy_available_dns.out +++ b/tsl/test/expected/dist_copy_available_dns.out @@ -8,6 +8,7 @@ \set DN_DBNAME_3 :TEST_DBNAME _3 SELECT 1 FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); +WARNING: adding data node is deprecated ?column? ---------- 1 @@ -15,6 +16,7 @@ SELECT 1 FROM add_data_node('data_node_1', host => 'localhost', SELECT 1 FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); +WARNING: adding data node is deprecated ?column? ---------- 1 @@ -22,6 +24,7 @@ SELECT 1 FROM add_data_node('data_node_2', host => 'localhost', SELECT 1 FROM add_data_node('data_node_3', host => 'localhost', database => :'DN_DBNAME_3'); +WARNING: adding data node is deprecated ?column? ---------- 1 @@ -40,6 +43,7 @@ SELECT ch.hypertable_name, format('%I.%I', ch.chunk_schema, ch.chunk_name)::regc create table uk_price_paid(price integer, "date" date, postcode1 text, postcode2 text, type smallint, is_new bool, duration smallint, addr1 text, addr2 text, street text, locality text, town text, district text, country text, category smallint); -- Aim to about 100 partitions, the data is from 1995 to 2022. select create_distributed_hypertable('uk_price_paid', 'date', chunk_time_interval => interval '270 day', replication_factor=>3); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "date" create_distributed_hypertable ------------------------------- @@ -48,6 +52,7 @@ NOTICE: adding not-null constraint to column "date" create table uk_price_paid_space2(like uk_price_paid); select create_distributed_hypertable('uk_price_paid_space2', 'date', 'postcode2', 2, chunk_time_interval => interval '270 day', replication_factor => 2); +WARNING: distributed hypertable is deprecated WARNING: insufficient number of partitions for dimension "postcode2" create_distributed_hypertable ----------------------------------- @@ -85,6 +90,7 @@ SELECT * FROM chunk_query_data_node WHERE hypertable_name = 'uk_price_paid' LIMI SET ROLE :ROLE_CLUSTER_SUPERUSER; SELECT * FROM alter_data_node('data_node_1', available=>false); +WARNING: altering data node is deprecated node_name | host | port | database | available -------------+-----------+-------+------------------------------+----------- data_node_1 | localhost | 55432 | db_dist_copy_available_dns_1 | f @@ -114,6 +120,7 @@ set timescaledb.max_open_chunks_per_insert = 1117; WARNING: insert cache size is larger than hypertable chunk cache size SET ROLE :ROLE_CLUSTER_SUPERUSER; SELECT * FROM alter_data_node('data_node_1', available=>true); +WARNING: altering data node is deprecated node_name | host | port | database | available -------------+-----------+-------+------------------------------+----------- data_node_1 | localhost | 55432 | db_dist_copy_available_dns_1 | t @@ -182,6 +189,7 @@ set timescaledb.dist_copy_transfer_format = 'binary'; create table uk_price_paid_bin(like uk_price_paid); select create_distributed_hypertable('uk_price_paid_bin', 'date', 'postcode2', chunk_time_interval => interval '90 day', replication_factor => 3); +WARNING: distributed hypertable is deprecated create_distributed_hypertable -------------------------------- (3,public,uk_price_paid_bin,t) @@ -206,6 +214,7 @@ SELECT * FROM chunk_query_data_node WHERE hypertable_name = 'uk_price_paid_bin' SET ROLE :ROLE_CLUSTER_SUPERUSER; SELECT * FROM alter_data_node('data_node_1', available=>false); +WARNING: altering data node is deprecated node_name | host | port | database | available -------------+-----------+-------+------------------------------+----------- data_node_1 | localhost | 55432 | db_dist_copy_available_dns_1 | f @@ -251,6 +260,7 @@ SELECT * FROM chunk_query_data_node WHERE hypertable_name = 'uk_price_paid_bin' SET ROLE :ROLE_CLUSTER_SUPERUSER; SELECT * FROM alter_data_node('data_node_1', available=>true); +WARNING: altering data node is deprecated node_name | host | port | database | available -------------+-----------+-------+------------------------------+----------- data_node_1 | localhost | 55432 | db_dist_copy_available_dns_1 | t @@ -278,6 +288,7 @@ SELECT * FROM chunk_query_data_node WHERE hypertable_name = 'uk_price_paid' LIMI SET ROLE :ROLE_CLUSTER_SUPERUSER; SELECT * FROM alter_data_node('data_node_2', available=>false); +WARNING: altering data node is deprecated node_name | host | port | database | available -------------+-----------+-------+------------------------------+----------- data_node_2 | localhost | 55432 | db_dist_copy_available_dns_2 | f @@ -303,6 +314,7 @@ SELECT * FROM chunk_query_data_node WHERE hypertable_name = 'uk_price_paid' LIMI SET ROLE :ROLE_CLUSTER_SUPERUSER; SELECT * FROM alter_data_node('data_node_2', available=>true); +WARNING: altering data node is deprecated node_name | host | port | database | available -------------+-----------+-------+------------------------------+----------- data_node_2 | localhost | 55432 | db_dist_copy_available_dns_2 | t @@ -314,6 +326,7 @@ SET timescaledb.enable_distributed_insert_with_copy=true; INSERT INTO uk_price_paid SELECT * FROM uk_price_paid_bin; SET ROLE :ROLE_CLUSTER_SUPERUSER; SELECT * FROM alter_data_node('data_node_3', available=>false); +WARNING: altering data node is deprecated node_name | host | port | database | available -------------+-----------+-------+------------------------------+----------- data_node_3 | localhost | 55432 | db_dist_copy_available_dns_3 | f diff --git a/tsl/test/expected/dist_copy_format_long.out b/tsl/test/expected/dist_copy_format_long.out index 0a0fd3e8725..b462a110556 100644 --- a/tsl/test/expected/dist_copy_format_long.out +++ b/tsl/test/expected/dist_copy_format_long.out @@ -9,6 +9,7 @@ \set DN_DBNAME_3 :TEST_DBNAME _3 SELECT 1 FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); +WARNING: adding data node is deprecated ?column? ---------- 1 @@ -16,6 +17,7 @@ SELECT 1 FROM add_data_node('data_node_1', host => 'localhost', SELECT 1 FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); +WARNING: adding data node is deprecated ?column? ---------- 1 @@ -23,6 +25,7 @@ SELECT 1 FROM add_data_node('data_node_2', host => 'localhost', SELECT 1 FROM add_data_node('data_node_3', host => 'localhost', database => :'DN_DBNAME_3'); +WARNING: adding data node is deprecated ?column? ---------- 1 @@ -38,6 +41,7 @@ SET ROLE :ROLE_1; create table uk_price_paid(price integer, "date" date, postcode1 text, postcode2 text, type smallint, is_new bool, duration smallint, addr1 text, addr2 text, street text, locality text, town text, district text, country text, category smallint); select create_distributed_hypertable('uk_price_paid', 'date', 'postcode2', chunk_time_interval => interval '270 day'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "date" create_distributed_hypertable ------------------------------- @@ -66,6 +70,7 @@ set timescaledb.dist_copy_transfer_format = 'binary'; create table uk_price_paid_bin(like uk_price_paid); select create_distributed_hypertable('uk_price_paid_bin', 'date', 'postcode2', chunk_time_interval => interval '90 day', replication_factor => 2); +WARNING: distributed hypertable is deprecated create_distributed_hypertable -------------------------------- (2,public,uk_price_paid_bin,t) diff --git a/tsl/test/expected/dist_copy_long.out b/tsl/test/expected/dist_copy_long.out index a4a7fa666ca..5d2af08e2a9 100644 --- a/tsl/test/expected/dist_copy_long.out +++ b/tsl/test/expected/dist_copy_long.out @@ -8,6 +8,7 @@ \set DN_DBNAME_3 :TEST_DBNAME _3 SELECT 1 FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); +WARNING: adding data node is deprecated ?column? ---------- 1 @@ -15,6 +16,7 @@ SELECT 1 FROM add_data_node('data_node_1', host => 'localhost', SELECT 1 FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); +WARNING: adding data node is deprecated ?column? ---------- 1 @@ -22,6 +24,7 @@ SELECT 1 FROM add_data_node('data_node_2', host => 'localhost', SELECT 1 FROM add_data_node('data_node_3', host => 'localhost', database => :'DN_DBNAME_3'); +WARNING: adding data node is deprecated ?column? ---------- 1 @@ -43,6 +46,7 @@ SET ROLE :ROLE_1; create table uk_price_paid(price integer, "date" date, postcode1 text, postcode2 text, type smallint, is_new bool, duration smallint, addr1 text, addr2 text, street text, locality text, town text, district text, country text, category smallint); -- Aim to about 100 partitions, the data is from 1995 to 2022. select create_distributed_hypertable('uk_price_paid', 'date', chunk_time_interval => interval '270 day'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "date" create_distributed_hypertable ------------------------------- @@ -51,6 +55,7 @@ NOTICE: adding not-null constraint to column "date" create table uk_price_paid_space2(like uk_price_paid); select create_distributed_hypertable('uk_price_paid_space2', 'date', 'postcode2', 2, chunk_time_interval => interval '270 day'); +WARNING: distributed hypertable is deprecated WARNING: insufficient number of partitions for dimension "postcode2" create_distributed_hypertable ----------------------------------- @@ -59,6 +64,7 @@ WARNING: insufficient number of partitions for dimension "postcode2" create table uk_price_paid_space10(like uk_price_paid); select create_distributed_hypertable('uk_price_paid_space10', 'date', 'postcode2', 10, chunk_time_interval => interval '270 day'); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------------ (3,public,uk_price_paid_space10,t) @@ -144,6 +150,7 @@ select count(*) from uk_price_paid; create table uk_price_paid_r2(like uk_price_paid); select create_distributed_hypertable('uk_price_paid_r2', 'date', 'postcode2', chunk_time_interval => interval '90 day', replication_factor => 2); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (4,public,uk_price_paid_r2,t) @@ -152,6 +159,7 @@ select create_distributed_hypertable('uk_price_paid_r2', 'date', 'postcode2', create table uk_price_paid_r3(like uk_price_paid); select create_distributed_hypertable('uk_price_paid_r3', 'date', 'postcode2', chunk_time_interval => interval '90 day', replication_factor => 3); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (5,public,uk_price_paid_r3,t) diff --git a/tsl/test/expected/dist_ddl.out b/tsl/test/expected/dist_ddl.out index bb366e7ec91..7cfa0428a7c 100644 --- a/tsl/test/expected/dist_ddl.out +++ b/tsl/test/expected/dist_ddl.out @@ -16,6 +16,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ---------------+---------------+--------------+------------------+------------------- db_dist_ddl_1 | db_dist_ddl_1 | t | t | t @@ -35,6 +38,7 @@ CREATE TABLE disttable(time timestamptz, device int, color int CONSTRAINT color_ CREATE UNIQUE INDEX disttable_pk ON disttable(time, temp); -- CREATE TABLE SELECT * FROM create_distributed_hypertable('disttable', 'time', 'temp', replication_factor => 3); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- @@ -76,6 +80,7 @@ SELECT * FROM test.show_constraints('disttable'); SELECT * FROM test.show_indexes('disttable'); SELECT * FROM test.show_triggers('disttable'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_columns('disttable') NOTICE: [db_dist_ddl_1]: @@ -214,6 +219,7 @@ SELECT * FROM test.show_constraints('disttable'); (2 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_constraints('disttable') $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_constraints('disttable') NOTICE: [db_dist_ddl_1]: Constraint |Type|Columns |Index|Expr |Deferrable|Deferred|Validated @@ -255,6 +261,7 @@ SELECT * FROM test.show_constraints('disttable'); (1 row) SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_constraints('disttable') $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_constraints('disttable') NOTICE: [db_dist_ddl_1]: Constraint |Type|Columns|Index|Expr |Deferrable|Deferred|Validated @@ -287,6 +294,7 @@ color_check|c |{color}|- |(color > 0)|f |f |t -- DROP CONSTRAINT pre-created ALTER TABLE disttable DROP CONSTRAINT color_check; SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_constraints('disttable') $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_constraints('disttable') NOTICE: [db_dist_ddl_1]: Constraint|Type|Columns|Index|Expr|Deferrable|Deferred|Validated @@ -316,6 +324,7 @@ Constraint|Type|Columns|Index|Expr|Deferrable|Deferred|Validated -- DROP COLUMN ALTER TABLE disttable DROP COLUMN color; SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_columns('disttable') $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_columns('disttable') NOTICE: [db_dist_ddl_1]: Column|Type |NotNull @@ -354,6 +363,7 @@ temp |double precision |f -- ADD COLUMN ALTER TABLE disttable ADD COLUMN description text; SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_columns('disttable') $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_columns('disttable') NOTICE: [db_dist_ddl_1]: Column |Type |NotNull @@ -395,6 +405,7 @@ description|text |f -- CREATE INDEX CREATE INDEX disttable_description_idx ON disttable (description); SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_indexes('disttable') $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_indexes('disttable') NOTICE: [db_dist_ddl_1]: Index |Columns |Expr|Unique|Primary|Exclusion|Tablespace @@ -503,6 +514,7 @@ SELECT tableowner FROM pg_tables WHERE tablename = 'disttable'; SELECT * FROM test.remote_exec(NULL, $$ SELECT tableowner FROM pg_tables WHERE tablename = 'disttable'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT tableowner FROM pg_tables WHERE tablename = 'disttable' NOTICE: [db_dist_ddl_1]: @@ -602,6 +614,7 @@ $$ SELECT relname, relreplident FROM pg_class WHERE relname = 'disttable' ORDER BY relname; SELECT relname, relreplident FROM show_chunks('disttable') ch INNER JOIN pg_class c ON (ch = c.oid) ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT relname, relreplident FROM pg_class WHERE relname = 'disttable' ORDER BY relname NOTICE: [db_dist_ddl_1]: @@ -679,6 +692,7 @@ $$ SELECT relname, relreplident FROM pg_class WHERE relname = 'disttable' ORDER BY relname; SELECT relname, relreplident FROM show_chunks('disttable') ch INNER JOIN pg_class c ON (ch = c.oid) ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT relname, relreplident FROM pg_class WHERE relname = 'disttable' ORDER BY relname NOTICE: [db_dist_ddl_1]: @@ -764,6 +778,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ (SELECT "Column" AS col FROM test.show_columns(chunk.relid) WHERE "Column"='descr') FROM (SELECT "Child" AS relid FROM test.show_subtables('disttable') LIMIT 1) chunk $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT chunk.relid AS chunk_relid, (SELECT "Column" AS col FROM test.show_columns(chunk.relid) WHERE "Column"='descr') @@ -796,6 +811,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ (SELECT "Constraint" AS constr FROM test.show_constraints(chunk.relid) WHERE "Constraint"='device_chk') FROM (SELECT "Child" AS relid FROM test.show_subtables('disttable') LIMIT 1) chunk $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT chunk.relid AS chunk_relid, (SELECT "Constraint" AS constr FROM test.show_constraints(chunk.relid) WHERE "Constraint"='device_chk') @@ -826,6 +842,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ SELECT chunk.relid AS chunk_relid, (test.show_indexes(chunk.relid)).* FROM (SELECT "Child" AS relid FROM test.show_subtables('disttable') LIMIT 1) chunk $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT chunk.relid AS chunk_relid, (test.show_indexes(chunk.relid)).* FROM (SELECT "Child" AS relid FROM test.show_subtables('disttable') LIMIT 1) chunk @@ -901,6 +918,7 @@ SELECT * FROM test.show_indexes('disttable'); (2 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_indexes('disttable') $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_indexes('disttable') NOTICE: [db_dist_ddl_1]: Index |Columns |Expr|Unique|Primary|Exclusion|Tablespace @@ -936,6 +954,7 @@ disttable_time_idx |{time} | |f |f |f | -- DROP TABLE DROP TABLE disttable; SELECT * FROM test.remote_exec(NULL, $$ SELECT 1 FROM pg_tables WHERE tablename = 'disttable' $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT 1 FROM pg_tables WHERE tablename = 'disttable' NOTICE: [db_dist_ddl_1]: ?column? @@ -974,6 +993,7 @@ FROM pg_catalog.pg_namespace s JOIN pg_catalog.pg_user u ON u.usesysid = s.nspowner WHERE s.nspname = 'dist_schema'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT s.nspname, u.usename FROM pg_catalog.pg_namespace s @@ -1024,6 +1044,7 @@ NOTICE: adding not-null constraint to column "time" (1 row) SELECT * FROM test.remote_exec(NULL, $$ SELECT schemaname, tablename FROM pg_tables WHERE tablename = 'some_dist_table' $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT schemaname, tablename FROM pg_tables WHERE tablename = 'some_dist_table' NOTICE: [db_dist_ddl_1]: schemaname |tablename @@ -1057,6 +1078,7 @@ dist_schema|some_dist_table DROP SCHEMA dist_schema CASCADE; NOTICE: drop cascades to table dist_schema.some_dist_table SELECT * FROM test.remote_exec(NULL, $$ SELECT schemaname, tablename FROM pg_tables WHERE tablename = 'some_dist_table' $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT schemaname, tablename FROM pg_tables WHERE tablename = 'some_dist_table' NOTICE: [db_dist_ddl_1]: schemaname|tablename @@ -1090,6 +1112,7 @@ FROM pg_catalog.pg_namespace s JOIN pg_catalog.pg_user u ON u.usesysid = s.nspowner WHERE s.nspname = 'dist_schema'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT s.nspname, u.usename FROM pg_catalog.pg_namespace s @@ -1136,6 +1159,7 @@ FROM pg_catalog.pg_namespace s JOIN pg_catalog.pg_user u ON u.usesysid = s.nspowner WHERE s.nspname = 'dist_schema_2'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT s.nspname, u.usename FROM pg_catalog.pg_namespace s @@ -1184,6 +1208,7 @@ FROM pg_catalog.pg_namespace s JOIN pg_catalog.pg_user u ON u.usesysid = s.nspowner WHERE s.nspname = 'dist_schema_2'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT s.nspname, u.usename FROM pg_catalog.pg_namespace s @@ -1232,6 +1257,7 @@ FROM pg_catalog.pg_namespace s JOIN pg_catalog.pg_user u ON u.usesysid = s.nspowner WHERE s.nspname = 'dist_schema_3'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT s.nspname, u.usename FROM pg_catalog.pg_namespace s @@ -1282,6 +1308,7 @@ FROM pg_catalog.pg_namespace s JOIN pg_catalog.pg_user u ON u.usesysid = s.nspowner WHERE s.nspname = 'dist_schema_3'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT s.nspname, u.usename FROM pg_catalog.pg_namespace s @@ -1332,6 +1359,7 @@ NOTICE: adding not-null constraint to column "time" ALTER SCHEMA dist_schema RENAME TO dist_schema_2; SELECT * FROM test.remote_exec(NULL, $$ SELECT schemaname, tablename FROM pg_tables WHERE tablename = 'some_dist_table' $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT schemaname, tablename FROM pg_tables WHERE tablename = 'some_dist_table' NOTICE: [db_dist_ddl_1]: schemaname |tablename @@ -1369,6 +1397,7 @@ FROM pg_catalog.pg_namespace s JOIN pg_catalog.pg_user u ON u.usesysid = s.nspowner WHERE s.nspname = 'dist_schema_2'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT s.nspname, u.usename FROM pg_catalog.pg_namespace s @@ -1423,6 +1452,7 @@ NOTICE: adding not-null constraint to column "time" CREATE INDEX some_dist_device_idx ON some_dist_table (device); SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_indexes('some_dist_table') $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_indexes('some_dist_table') NOTICE: [db_dist_ddl_1]: Index |Columns |Expr|Unique|Primary|Exclusion|Tablespace @@ -1457,6 +1487,7 @@ some_dist_table_time_idx|{time} | |f |f |f | ALTER TABLE some_dist_table DROP COLUMN device; SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_indexes('some_dist_table') $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_indexes('some_dist_table') NOTICE: [db_dist_ddl_1]: Index |Columns|Expr|Unique|Primary|Exclusion|Tablespace @@ -1522,6 +1553,7 @@ FROM pg_catalog.pg_namespace s JOIN pg_catalog.pg_user u ON u.usesysid = s.nspowner WHERE s.nspname = 'schema_global'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT s.nspname, u.usename FROM pg_catalog.pg_namespace s @@ -1585,6 +1617,7 @@ FROM pg_catalog.pg_namespace s JOIN pg_catalog.pg_user u ON u.usesysid = s.nspowner WHERE s.nspname = 'schema_global'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT s.nspname, u.usename FROM pg_catalog.pg_namespace s @@ -1647,6 +1680,7 @@ SELECT * FROM test.show_indexes('some_dist_table'); (2 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_indexes('some_dist_table') $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_indexes('some_dist_table') NOTICE: [db_dist_ddl_1]: Index |Columns |Expr|Unique|Primary|Exclusion|Tablespace @@ -1699,6 +1733,7 @@ SELECT * FROM test.show_indexes('some_dist_table'); (1 row) SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_indexes('some_dist_table') $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_indexes('some_dist_table') NOTICE: [db_dist_ddl_1]: Index |Columns|Expr|Unique|Primary|Exclusion|Tablespace @@ -1733,6 +1768,7 @@ DROP TABLE some_dist_table; BEGIN; CREATE TABLE some_dist_table(time timestamptz, device int); SELECT * FROM create_distributed_hypertable('some_dist_table', 'time'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+-----------------+--------- @@ -1779,6 +1815,7 @@ SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_indexes('some_dist_table'); SELECT * FROM test.show_constraints('some_dist_table'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_indexes('some_dist_table') NOTICE: [db_dist_ddl_1]: @@ -1870,6 +1907,7 @@ SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_indexes('some_dist_table'); SELECT * FROM test.show_constraints('some_dist_table'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_indexes('some_dist_table') NOTICE: [db_dist_ddl_1]: @@ -1960,6 +1998,7 @@ SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_indexes('some_dist_table'); SELECT * FROM test.show_constraints('some_dist_table'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_indexes('some_dist_table') NOTICE: [db_dist_ddl_1]: @@ -2055,6 +2094,7 @@ SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_indexes('some_dist_table'); SELECT * FROM test.show_constraints('some_dist_table'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_indexes('some_dist_table') NOTICE: [db_dist_ddl_1]: @@ -2146,6 +2186,7 @@ SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_indexes('some_dist_table'); SELECT * FROM test.show_constraints('some_dist_table'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_indexes('some_dist_table') NOTICE: [db_dist_ddl_1]: @@ -2235,6 +2276,7 @@ SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_indexes('some_dist_table'); SELECT * FROM test.show_constraints('some_dist_table'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_indexes('some_dist_table') NOTICE: [db_dist_ddl_1]: @@ -2328,6 +2370,7 @@ SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_indexes('some_dist_table'); SELECT * FROM test.show_constraints('some_dist_table'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * FROM test.show_indexes('some_dist_table') NOTICE: [db_dist_ddl_1]: @@ -2435,6 +2478,7 @@ SELECT * FROM test.show_constraints('disttable'); SELECT (test.show_constraints(chunk)).* FROM show_chunks('disttable') AS chunk; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT show_chunks('disttable') NOTICE: [db_dist_ddl_1]: @@ -2667,6 +2711,7 @@ DROP TABLE disttable; -- without enabling timescaledb.enable_client_ddl_on_data_nodes guc CREATE TABLE disttable(time timestamptz NOT NULL, device int); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', replication_factor => 3); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 19 | public | disttable | t @@ -2685,6 +2730,7 @@ DROP TABLE disttable; -- CREATE TABLE hyper(time TIMESTAMPTZ, device INT, temp FLOAT); SELECT create_distributed_hypertable('hyper', 'time', 'device', 4, chunk_time_interval => interval '18 hours', replication_factor => 1, data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -2702,6 +2748,7 @@ DROP TABLE hyper; -- CREATE TABLE disttable(time timestamptz NOT NULL, device int); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 21 | public | disttable | t @@ -2948,6 +2995,7 @@ SET timescaledb.hypertable_distributed_default TO 'auto'; SET timescaledb.hypertable_replication_factor_default TO 3; CREATE TABLE drf_test(time TIMESTAMPTZ NOT NULL); SELECT create_distributed_hypertable('drf_test', 'time'); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (33,public,drf_test,t) @@ -2965,6 +3013,7 @@ DROP TABLE drf_test; -- test directly on a data node first CREATE TABLE dist_test(time timestamptz NOT NULL, device int, temp float); SELECT create_distributed_hypertable('dist_test', 'time', 'device', 3, replication_factor => 3); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (34,public,dist_test,t) @@ -2982,6 +3031,7 @@ SELECT * from show_chunks('dist_test'); (5 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * from show_chunks('dist_test') NOTICE: [db_dist_ddl_1]: show_chunks @@ -3131,6 +3181,7 @@ DROP TABLE dist_test; -- test from access node CREATE TABLE dist_test(time timestamptz NOT NULL, device int, temp float); SELECT create_distributed_hypertable('dist_test', 'time', 'device', 3, replication_factor => 3); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (35,public,dist_test,t) @@ -3148,6 +3199,7 @@ SELECT * from show_chunks('dist_test'); (5 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * from show_chunks('dist_test') NOTICE: [db_dist_ddl_1]: show_chunks @@ -3208,6 +3260,7 @@ SELECT * from show_chunks('dist_test'); (3 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * from show_chunks('dist_test') NOTICE: [db_dist_ddl_1]: show_chunks @@ -3277,6 +3330,7 @@ SELECT * from show_chunks('dist_test'); (3 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * from show_chunks('dist_test') NOTICE: [db_dist_ddl_1]: show_chunks @@ -3348,6 +3402,7 @@ SELECT * from show_chunks('dist_test'); (2 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * from show_chunks('dist_test') NOTICE: [db_dist_ddl_1]: show_chunks @@ -3423,6 +3478,7 @@ DROP TABLE dist_test; -- CREATE TABLE dist_test(time timestamptz NOT NULL, device int, temp float); SELECT create_distributed_hypertable('dist_test', 'time', 'device', 3, replication_factor => 3); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (36,public,dist_test,t) @@ -3440,6 +3496,7 @@ SELECT * from show_chunks('dist_test'); (5 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_ddl_1]: SELECT * from show_chunks('dist_test') NOTICE: [db_dist_ddl_1]: show_chunks @@ -3482,6 +3539,7 @@ _timescaledb_internal._dist_hyper_36_45_chunk (1 row) SELECT alter_data_node(:'DATA_NODE_1', available => false); +WARNING: altering data node is deprecated alter_data_node ------------------------------------------------- (db_dist_ddl_1,localhost,55432,db_dist_ddl_1,f) @@ -3514,6 +3572,7 @@ SELECT * from show_chunks('dist_test'); \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; -- drop stale chunks by making data node available SELECT alter_data_node(:'DATA_NODE_1', available => true); +WARNING: altering data node is deprecated alter_data_node ------------------------------------------------- (db_dist_ddl_1,localhost,55432,db_dist_ddl_1,t) diff --git a/tsl/test/expected/dist_grant-13.out b/tsl/test/expected/dist_grant-13.out index d39816494ac..62ca1d83cf5 100644 --- a/tsl/test/expected/dist_grant-13.out +++ b/tsl/test/expected/dist_grant-13.out @@ -14,6 +14,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------+-----------------+--------------+------------------+------------------- db_dist_grant_1 | db_dist_grant_1 | t | t | t @@ -31,6 +34,7 @@ SELECT relname, relacl FROM pg_class WHERE relname = 'conditions'; (1 row) SELECT * FROM create_distributed_hypertable('conditions', 'time', 'device'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 1 | public | conditions | t @@ -49,6 +53,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'DELETE') AS "DELETE" , has_table_privilege('%s', 'conditions', 'INSERT') AS "INSERT"; $$, :'ROLE_1', :'ROLE_1', :'ROLE_1')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_1', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_1', 'conditions', 'DELETE') AS "DELETE" @@ -100,6 +105,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'DELETE') AS "DELETE" , has_table_privilege('%s', 'conditions', 'INSERT') AS "INSERT"; $$, :'ROLE_2', :'ROLE_2', :'ROLE_2')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" @@ -172,6 +178,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'UPDATE') AS "UPDATE" , has_table_privilege('%s', 'conditions', 'TRUNCATE') AS "TRUNCATE"; $$, :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" @@ -219,6 +226,7 @@ f |t |t |t |f -- Add another data node and check that grants are propagated when the -- data node is attached to an existing table. SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data4', host => 'localhost', database => :'DATA_NODE_4'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------+-----------------+--------------+------------------+------------------- data4 | db_dist_grant_4 | t | t | t @@ -232,6 +240,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'UPDATE') AS "UPDATE" , has_table_privilege('%s', 'conditions', 'TRUNCATE') AS "TRUNCATE"; $$, :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" @@ -280,6 +289,7 @@ NOTICE: [data4]: ERROR: [data4]: relation "conditions" does not exist \set ON_ERROR_STOP 1 SELECT * FROM attach_data_node('data4', 'conditions'); +WARNING: attaching data node is deprecated NOTICE: the number of partitions in dimension "device" was increased to 4 hypertable_id | node_hypertable_id | node_name ---------------+--------------------+----------- @@ -296,6 +306,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'UPDATE') AS "UPDATE" , has_table_privilege('%s', 'conditions', 'TRUNCATE') AS "TRUNCATE"; $$, :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" @@ -360,6 +371,7 @@ CREATE TABLE no_grants(time TIMESTAMPTZ NOT NULL, device INTEGER, temperature FL GRANT SELECT ON no_grants TO :ROLE_1; -- First case is when table is created. Grants should not be propagated. SELECT * FROM create_distributed_hypertable('no_grants', 'time', 'device'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 2 | public | no_grants | t @@ -378,6 +390,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'no_grants', 'DELETE') AS "DELETE" , has_table_privilege('%s', 'no_grants', 'INSERT') AS "INSERT"; $$, :'ROLE_1', :'ROLE_1', :'ROLE_1')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_1', 'no_grants', 'SELECT') AS "SELECT" , has_table_privilege('test_role_1', 'no_grants', 'DELETE') AS "DELETE" @@ -443,6 +456,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'no_grants', 'DELETE') AS "DELETE" , has_table_privilege('%s', 'no_grants', 'INSERT') AS "INSERT"; $$, :'ROLE_1', :'ROLE_1', :'ROLE_1')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_1', 'no_grants', 'SELECT') AS "SELECT" , has_table_privilege('test_role_1', 'no_grants', 'DELETE') AS "DELETE" @@ -675,6 +689,7 @@ CREATE TABLE conditions( PRIMARY KEY (time,device) ); SELECT * FROM create_distributed_hypertable('conditions', 'time', 'device', 3); +WARNING: distributed hypertable is deprecated WARNING: insufficient number of partitions for dimension "device" hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- @@ -900,6 +915,7 @@ CREATE TABLE measurements( ); -- Create a distributed hypertable with chunks in the same schema SELECT * FROM create_distributed_hypertable('public.measurements', 'time', chunk_time_interval => '5 days'::interval, associated_schema_name => 'public'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+--------------+--------- 5 | public | measurements | t @@ -1160,6 +1176,7 @@ CREATE TABLE disttable_role_3(time timestamptz, device int, temp float); \set ON_ERROR_STOP 0 -- Can't create distributed hypertable without GRANTs on foreign servers (data nodes) SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated ERROR: permission denied for foreign server db_dist_grant_1 \set ON_ERROR_STOP 1 -- Grant USAGE on DATA_NODE_1 (but it is not enough) @@ -1169,6 +1186,7 @@ GRANT CREATE ON SCHEMA public TO :ROLE_3; SET ROLE :ROLE_3; \set ON_ERROR_STOP 0 SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated ERROR: permission denied for foreign server db_dist_grant_2 \set ON_ERROR_STOP 1 -- Creating the hypertable should work with GRANTs on both servers. @@ -1180,6 +1198,7 @@ SET ROLE :ROLE_3; -- Still cannot connect since there is no password in the passfile and -- no user mapping. SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" ERROR: could not connect to "db_dist_grant_1" \set ON_ERROR_STOP 1 @@ -1190,6 +1209,7 @@ SET ROLE :ROLE_3; -- Still cannot connect since there is only a user mapping for data -- node DATA_NODE_1. SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" ERROR: could not connect to "db_dist_grant_2" \set ON_ERROR_STOP 1 @@ -1201,6 +1221,7 @@ SET ROLE :ROLE_3; -- User should be able to connect and create the distributed -- hypertable at this point. SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+------------------+--------- @@ -1241,6 +1262,7 @@ ALTER DEFAULT PRIVILEGES GRANT INSERT ON TABLES TO :ROLE_1; SELECT FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl @@ -1265,6 +1287,7 @@ ALTER DEFAULT PRIVILEGES REVOKE INSERT ON TABLES FROM :ROLE_1; SELECT FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl diff --git a/tsl/test/expected/dist_grant-14.out b/tsl/test/expected/dist_grant-14.out index 810a9be4253..7f80b8dcb8f 100644 --- a/tsl/test/expected/dist_grant-14.out +++ b/tsl/test/expected/dist_grant-14.out @@ -14,6 +14,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------+-----------------+--------------+------------------+------------------- db_dist_grant_1 | db_dist_grant_1 | t | t | t @@ -31,6 +34,7 @@ SELECT relname, relacl FROM pg_class WHERE relname = 'conditions'; (1 row) SELECT * FROM create_distributed_hypertable('conditions', 'time', 'device'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 1 | public | conditions | t @@ -49,6 +53,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'DELETE') AS "DELETE" , has_table_privilege('%s', 'conditions', 'INSERT') AS "INSERT"; $$, :'ROLE_1', :'ROLE_1', :'ROLE_1')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_1', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_1', 'conditions', 'DELETE') AS "DELETE" @@ -100,6 +105,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'DELETE') AS "DELETE" , has_table_privilege('%s', 'conditions', 'INSERT') AS "INSERT"; $$, :'ROLE_2', :'ROLE_2', :'ROLE_2')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" @@ -172,6 +178,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'UPDATE') AS "UPDATE" , has_table_privilege('%s', 'conditions', 'TRUNCATE') AS "TRUNCATE"; $$, :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" @@ -219,6 +226,7 @@ f |t |t |t |f -- Add another data node and check that grants are propagated when the -- data node is attached to an existing table. SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data4', host => 'localhost', database => :'DATA_NODE_4'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------+-----------------+--------------+------------------+------------------- data4 | db_dist_grant_4 | t | t | t @@ -232,6 +240,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'UPDATE') AS "UPDATE" , has_table_privilege('%s', 'conditions', 'TRUNCATE') AS "TRUNCATE"; $$, :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" @@ -280,6 +289,7 @@ NOTICE: [data4]: ERROR: [data4]: relation "conditions" does not exist \set ON_ERROR_STOP 1 SELECT * FROM attach_data_node('data4', 'conditions'); +WARNING: attaching data node is deprecated NOTICE: the number of partitions in dimension "device" was increased to 4 hypertable_id | node_hypertable_id | node_name ---------------+--------------------+----------- @@ -296,6 +306,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'UPDATE') AS "UPDATE" , has_table_privilege('%s', 'conditions', 'TRUNCATE') AS "TRUNCATE"; $$, :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" @@ -360,6 +371,7 @@ CREATE TABLE no_grants(time TIMESTAMPTZ NOT NULL, device INTEGER, temperature FL GRANT SELECT ON no_grants TO :ROLE_1; -- First case is when table is created. Grants should not be propagated. SELECT * FROM create_distributed_hypertable('no_grants', 'time', 'device'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 2 | public | no_grants | t @@ -378,6 +390,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'no_grants', 'DELETE') AS "DELETE" , has_table_privilege('%s', 'no_grants', 'INSERT') AS "INSERT"; $$, :'ROLE_1', :'ROLE_1', :'ROLE_1')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_1', 'no_grants', 'SELECT') AS "SELECT" , has_table_privilege('test_role_1', 'no_grants', 'DELETE') AS "DELETE" @@ -443,6 +456,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'no_grants', 'DELETE') AS "DELETE" , has_table_privilege('%s', 'no_grants', 'INSERT') AS "INSERT"; $$, :'ROLE_1', :'ROLE_1', :'ROLE_1')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_1', 'no_grants', 'SELECT') AS "SELECT" , has_table_privilege('test_role_1', 'no_grants', 'DELETE') AS "DELETE" @@ -675,6 +689,7 @@ CREATE TABLE conditions( PRIMARY KEY (time,device) ); SELECT * FROM create_distributed_hypertable('conditions', 'time', 'device', 3); +WARNING: distributed hypertable is deprecated WARNING: insufficient number of partitions for dimension "device" hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- @@ -900,6 +915,7 @@ CREATE TABLE measurements( ); -- Create a distributed hypertable with chunks in the same schema SELECT * FROM create_distributed_hypertable('public.measurements', 'time', chunk_time_interval => '5 days'::interval, associated_schema_name => 'public'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+--------------+--------- 5 | public | measurements | t @@ -1164,6 +1180,7 @@ CREATE TABLE disttable_role_3(time timestamptz, device int, temp float); \set ON_ERROR_STOP 0 -- Can't create distributed hypertable without GRANTs on foreign servers (data nodes) SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated ERROR: permission denied for foreign server db_dist_grant_1 \set ON_ERROR_STOP 1 -- Grant USAGE on DATA_NODE_1 (but it is not enough) @@ -1173,6 +1190,7 @@ GRANT CREATE ON SCHEMA public TO :ROLE_3; SET ROLE :ROLE_3; \set ON_ERROR_STOP 0 SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated ERROR: permission denied for foreign server db_dist_grant_2 \set ON_ERROR_STOP 1 -- Creating the hypertable should work with GRANTs on both servers. @@ -1184,6 +1202,7 @@ SET ROLE :ROLE_3; -- Still cannot connect since there is no password in the passfile and -- no user mapping. SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" ERROR: could not connect to "db_dist_grant_1" \set ON_ERROR_STOP 1 @@ -1194,6 +1213,7 @@ SET ROLE :ROLE_3; -- Still cannot connect since there is only a user mapping for data -- node DATA_NODE_1. SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" ERROR: could not connect to "db_dist_grant_2" \set ON_ERROR_STOP 1 @@ -1205,6 +1225,7 @@ SET ROLE :ROLE_3; -- User should be able to connect and create the distributed -- hypertable at this point. SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+------------------+--------- @@ -1245,6 +1266,7 @@ ALTER DEFAULT PRIVILEGES GRANT INSERT ON TABLES TO :ROLE_1; SELECT FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl @@ -1269,6 +1291,7 @@ ALTER DEFAULT PRIVILEGES REVOKE INSERT ON TABLES FROM :ROLE_1; SELECT FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl diff --git a/tsl/test/expected/dist_grant-15.out b/tsl/test/expected/dist_grant-15.out index 810a9be4253..7f80b8dcb8f 100644 --- a/tsl/test/expected/dist_grant-15.out +++ b/tsl/test/expected/dist_grant-15.out @@ -14,6 +14,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------+-----------------+--------------+------------------+------------------- db_dist_grant_1 | db_dist_grant_1 | t | t | t @@ -31,6 +34,7 @@ SELECT relname, relacl FROM pg_class WHERE relname = 'conditions'; (1 row) SELECT * FROM create_distributed_hypertable('conditions', 'time', 'device'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 1 | public | conditions | t @@ -49,6 +53,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'DELETE') AS "DELETE" , has_table_privilege('%s', 'conditions', 'INSERT') AS "INSERT"; $$, :'ROLE_1', :'ROLE_1', :'ROLE_1')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_1', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_1', 'conditions', 'DELETE') AS "DELETE" @@ -100,6 +105,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'DELETE') AS "DELETE" , has_table_privilege('%s', 'conditions', 'INSERT') AS "INSERT"; $$, :'ROLE_2', :'ROLE_2', :'ROLE_2')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" @@ -172,6 +178,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'UPDATE') AS "UPDATE" , has_table_privilege('%s', 'conditions', 'TRUNCATE') AS "TRUNCATE"; $$, :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" @@ -219,6 +226,7 @@ f |t |t |t |f -- Add another data node and check that grants are propagated when the -- data node is attached to an existing table. SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data4', host => 'localhost', database => :'DATA_NODE_4'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------+-----------------+--------------+------------------+------------------- data4 | db_dist_grant_4 | t | t | t @@ -232,6 +240,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'UPDATE') AS "UPDATE" , has_table_privilege('%s', 'conditions', 'TRUNCATE') AS "TRUNCATE"; $$, :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" @@ -280,6 +289,7 @@ NOTICE: [data4]: ERROR: [data4]: relation "conditions" does not exist \set ON_ERROR_STOP 1 SELECT * FROM attach_data_node('data4', 'conditions'); +WARNING: attaching data node is deprecated NOTICE: the number of partitions in dimension "device" was increased to 4 hypertable_id | node_hypertable_id | node_name ---------------+--------------------+----------- @@ -296,6 +306,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'conditions', 'UPDATE') AS "UPDATE" , has_table_privilege('%s', 'conditions', 'TRUNCATE') AS "TRUNCATE"; $$, :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2', :'ROLE_2')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_2', 'conditions', 'SELECT') AS "SELECT" , has_table_privilege('test_role_2', 'conditions', 'DELETE') AS "DELETE" @@ -360,6 +371,7 @@ CREATE TABLE no_grants(time TIMESTAMPTZ NOT NULL, device INTEGER, temperature FL GRANT SELECT ON no_grants TO :ROLE_1; -- First case is when table is created. Grants should not be propagated. SELECT * FROM create_distributed_hypertable('no_grants', 'time', 'device'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 2 | public | no_grants | t @@ -378,6 +390,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'no_grants', 'DELETE') AS "DELETE" , has_table_privilege('%s', 'no_grants', 'INSERT') AS "INSERT"; $$, :'ROLE_1', :'ROLE_1', :'ROLE_1')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_1', 'no_grants', 'SELECT') AS "SELECT" , has_table_privilege('test_role_1', 'no_grants', 'DELETE') AS "DELETE" @@ -443,6 +456,7 @@ SELECT * FROM test.remote_exec(NULL, format($$ , has_table_privilege('%s', 'no_grants', 'DELETE') AS "DELETE" , has_table_privilege('%s', 'no_grants', 'INSERT') AS "INSERT"; $$, :'ROLE_1', :'ROLE_1', :'ROLE_1')); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT has_table_privilege('test_role_1', 'no_grants', 'SELECT') AS "SELECT" , has_table_privilege('test_role_1', 'no_grants', 'DELETE') AS "DELETE" @@ -675,6 +689,7 @@ CREATE TABLE conditions( PRIMARY KEY (time,device) ); SELECT * FROM create_distributed_hypertable('conditions', 'time', 'device', 3); +WARNING: distributed hypertable is deprecated WARNING: insufficient number of partitions for dimension "device" hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- @@ -900,6 +915,7 @@ CREATE TABLE measurements( ); -- Create a distributed hypertable with chunks in the same schema SELECT * FROM create_distributed_hypertable('public.measurements', 'time', chunk_time_interval => '5 days'::interval, associated_schema_name => 'public'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+--------------+--------- 5 | public | measurements | t @@ -1164,6 +1180,7 @@ CREATE TABLE disttable_role_3(time timestamptz, device int, temp float); \set ON_ERROR_STOP 0 -- Can't create distributed hypertable without GRANTs on foreign servers (data nodes) SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated ERROR: permission denied for foreign server db_dist_grant_1 \set ON_ERROR_STOP 1 -- Grant USAGE on DATA_NODE_1 (but it is not enough) @@ -1173,6 +1190,7 @@ GRANT CREATE ON SCHEMA public TO :ROLE_3; SET ROLE :ROLE_3; \set ON_ERROR_STOP 0 SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated ERROR: permission denied for foreign server db_dist_grant_2 \set ON_ERROR_STOP 1 -- Creating the hypertable should work with GRANTs on both servers. @@ -1184,6 +1202,7 @@ SET ROLE :ROLE_3; -- Still cannot connect since there is no password in the passfile and -- no user mapping. SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" ERROR: could not connect to "db_dist_grant_1" \set ON_ERROR_STOP 1 @@ -1194,6 +1213,7 @@ SET ROLE :ROLE_3; -- Still cannot connect since there is only a user mapping for data -- node DATA_NODE_1. SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" ERROR: could not connect to "db_dist_grant_2" \set ON_ERROR_STOP 1 @@ -1205,6 +1225,7 @@ SET ROLE :ROLE_3; -- User should be able to connect and create the distributed -- hypertable at this point. SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nodes => ARRAY[:'DATA_NODE_1',:'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+------------------+--------- @@ -1245,6 +1266,7 @@ ALTER DEFAULT PRIVILEGES GRANT INSERT ON TABLES TO :ROLE_1; SELECT FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl @@ -1269,6 +1291,7 @@ ALTER DEFAULT PRIVILEGES REVOKE INSERT ON TABLES FROM :ROLE_1; SELECT FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_grant_1]: SELECT defaclrole::regrole, defaclobjtype, defaclacl FROM pg_default_acl diff --git a/tsl/test/expected/dist_hypertable-13.out b/tsl/test/expected/dist_hypertable-13.out index be1a9b262b4..51d8eae846b 100644 --- a/tsl/test/expected/dist_hypertable-13.out +++ b/tsl/test/expected/dist_hypertable-13.out @@ -21,6 +21,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------------+----------------------+--------------+------------------+------------------- db_dist_hypertable_1 | db_dist_hypertable_1 | t | t | t @@ -62,6 +65,7 @@ ERROR: operation not supported -- constraint to test how those work CREATE TABLE disttable(time timestamptz, device int CHECK (device > 0), color int, temp float, PRIMARY KEY (time,device)); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 1); +WARNING: distributed hypertable is deprecated WARNING: insufficient number of partitions for dimension "device" hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- @@ -136,6 +140,7 @@ ERROR: replication factor too large for hypertable "underreplicated" RESET ROLE; SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node(:'DATA_NODE_4', host => 'localhost', database => :'DATA_NODE_4'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------------+----------------------+--------------+------------------+------------------- db_dist_hypertable_4 | db_dist_hypertable_4 | t | t | t @@ -174,6 +179,7 @@ SELECT * FROM hypertable_partitions WHERE table_name = 'disttable'; (8 rows) SELECT attach_data_node(:'DATA_NODE_4', 'disttable', repartition => false); +WARNING: attaching data node is deprecated attach_data_node ---------------------------- (1,2,db_dist_hypertable_4) @@ -195,6 +201,7 @@ SELECT * FROM hypertable_partitions WHERE table_name = 'disttable'; --create new session to clear out connections \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; SELECT * FROM delete_data_node(:'DATA_NODE_4', force => true, drop_database => true, repartition => false); +WARNING: deleting data node is deprecated WARNING: insufficient number of data nodes for distributed hypertable "underreplicated" delete_data_node ------------------ @@ -516,6 +523,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable') @@ -557,6 +565,7 @@ chunk_id|hypertable_id|schema_name |table_name |relkind|slice SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM disttable; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM disttable NOTICE: [db_dist_hypertable_1]: @@ -1424,6 +1433,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable') @@ -1469,6 +1479,7 @@ chunk_id|hypertable_id|schema_name |table_name |relkind|slice SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM disttable; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM disttable NOTICE: [db_dist_hypertable_1]: @@ -1576,6 +1587,7 @@ ERROR: ON CONFLICT DO UPDATE not supported on distributed hypertables SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ INSERT INTO disttable VALUES ('2019-01-02 12:34', 1, 2, 9.3) $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: INSERT INTO disttable VALUES ('2019-01-02 12:34', 1, 2, 9.3) @@ -1585,6 +1597,7 @@ ERROR: [db_dist_hypertable_1]: distributed hypertable member cannot create chun SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ INSERT INTO disttable VALUES ('2017-09-03 06:09', 1, 2, 9.3) $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: INSERT INTO disttable VALUES ('2017-09-03 06:09', 1, 2, 9.3) @@ -1685,6 +1698,7 @@ SELECT * FROM disttable; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM disttable; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM disttable NOTICE: [db_dist_hypertable_1]: @@ -1758,6 +1772,7 @@ SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, SELECT * FROM show_chunks('disttable'); SELECT * FROM disttable; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk NOTICE: [db_dist_hypertable_1]: @@ -1878,6 +1893,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('underreplicated'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('underreplicated') @@ -1916,6 +1932,7 @@ chunk_id|hypertable_id|schema_name |table_name |relkind|slic SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM underreplicated; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM underreplicated NOTICE: [db_dist_hypertable_1]: @@ -1970,6 +1987,7 @@ SELECT * FROM underreplicated; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM underreplicated; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM underreplicated NOTICE: [db_dist_hypertable_1]: @@ -2016,6 +2034,7 @@ RETURNING *; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM underreplicated; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM underreplicated NOTICE: [db_dist_hypertable_1]: @@ -2053,6 +2072,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_3'], $$ CREATE TABLE remotetable(time timestamptz PRIMARY KEY, id int, cost float); SELECT * FROM underreplicated; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_3]: CREATE TABLE remotetable(time timestamptz PRIMARY KEY, id int, cost float) NOTICE: [db_dist_hypertable_3]: @@ -2076,6 +2096,7 @@ ERROR: [db_dist_hypertable_3]: relation "remotetable" already exists -- Test distributed_hypertable creation fails with replication factor 0 CREATE TABLE remotetable2(time timestamptz PRIMARY KEY, device int CHECK (device > 0), color int, temp float); SELECT * FROM create_distributed_hypertable('remotetable2', 'time', replication_factor => 0); +WARNING: distributed hypertable is deprecated ERROR: invalid replication factor \set ON_ERROR_STOP 1 SELECT * FROM timescaledb_information.hypertables @@ -2102,6 +2123,7 @@ SELECT * FROM create_distributed_hypertable('"Table\\Schema"."Param_Table"', 'ti associated_schema_name => 'T3sTSch', associated_table_prefix => 'test*pre_', chunk_time_interval => interval '1 week', create_default_indexes => FALSE, if_not_exists => TRUE, replication_factor => 2, data_nodes => ARRAY[:'DATA_NODE_2', :'DATA_NODE_3']); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time Col %#^#@$#" hypertable_id | schema_name | table_name | created ---------------+---------------+-------------+--------- @@ -2110,6 +2132,7 @@ NOTICE: adding not-null constraint to column "time Col %#^#@$#" -- Test detach and attach data node SELECT * FROM detach_data_node(:'DATA_NODE_2', '"Table\\Schema"."Param_Table"', force => true, drop_remote_data => true); +WARNING: detaching data node is deprecated WARNING: insufficient number of data nodes for distributed hypertable "Param_Table" NOTICE: the number of partitions in dimension "__region" of hypertable "Param_Table" was decreased to 1 detach_data_node @@ -2143,6 +2166,7 @@ ORDER BY 1, 2; (1 row) SELECT * FROM attach_data_node(:'DATA_NODE_1', '"Table\\Schema"."Param_Table"'); +WARNING: attaching data node is deprecated NOTICE: the number of partitions in dimension "__region" was increased to 2 hypertable_id | node_hypertable_id | node_name ---------------+--------------------+---------------------- @@ -2175,6 +2199,7 @@ ORDER BY 1, 2; -- Attach another data node but do not auto-repartition, i.e., -- increase the number of slices. SELECT * FROM attach_data_node(:'DATA_NODE_2', '"Table\\Schema"."Param_Table"', repartition => false); +WARNING: attaching data node is deprecated WARNING: insufficient number of partitions for dimension "__region" hypertable_id | node_hypertable_id | node_name ---------------+--------------------+---------------------- @@ -2231,6 +2256,7 @@ SELECT * FROM _timescaledb_catalog.dimension; SELECT t.tgname, t.tgtype, t.tgfoid::regproc FROM pg_trigger t, pg_class c WHERE c.relname = 'Param_Table' AND t.tgrelid = c.oid; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM _timescaledb_catalog.hypertable NOTICE: [db_dist_hypertable_1]: @@ -2340,6 +2366,7 @@ ts_insert_blocker| 7|_timescaledb_functions.insert_blocker -- Verify that repartitioning works as expected on detach_data_node SELECT * FROM detach_data_node(:'DATA_NODE_1', '"Table\\Schema"."Param_Table"', repartition => true); +WARNING: detaching data node is deprecated NOTICE: the number of partitions in dimension "__region" of hypertable "Param_Table" was decreased to 2 detach_data_node ------------------ @@ -2357,6 +2384,7 @@ AND h.table_name = 'Param_Table'; (2 rows) SELECT * FROM detach_data_node(:'DATA_NODE_2', '"Table\\Schema"."Param_Table"', force => true, repartition => false); +WARNING: detaching data node is deprecated WARNING: insufficient number of data nodes for distributed hypertable "Param_Table" detach_data_node ------------------ @@ -2377,6 +2405,7 @@ AND h.table_name = 'Param_Table'; -- should be propagated to backends. CREATE TABLE dimented_table (time timestamptz, column1 int, column2 timestamptz, column3 int); SELECT * FROM create_distributed_hypertable('dimented_table', 'time', partitioning_column => 'column1', number_partitions => 4, replication_factor => 1, data_nodes => ARRAY[:'DATA_NODE_1']); +WARNING: distributed hypertable is deprecated WARNING: only one data node was assigned to the hypertable NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created @@ -2455,6 +2484,7 @@ SELECT * FROM _timescaledb_catalog.dimension; (9 rows) SELECT * FROM attach_data_node(:'DATA_NODE_2', 'dimented_table'); +WARNING: attaching data node is deprecated hypertable_id | node_hypertable_id | node_name ---------------+--------------------+---------------------- 5 | 5 | db_dist_hypertable_2 @@ -2478,6 +2508,7 @@ SELECT * FROM _timescaledb_catalog.dimension; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM _timescaledb_catalog.dimension; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM _timescaledb_catalog.dimension NOTICE: [db_dist_hypertable_1]: @@ -3086,6 +3117,7 @@ SELECT * FROM twodim ORDER BY time; SELECT count(*) FROM twodim; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM twodim ORDER BY time @@ -3233,6 +3265,7 @@ SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct NOTICE: [db_dist_hypertable_1]: @@ -3270,6 +3303,7 @@ CREATE TABLE "disttable'quote"(time timestamptz, "device'quote" int, val float, SELECT public.create_distributed_hypertable( 'disttable''quote', 'time', 'device''quote', data_nodes => ARRAY[:'DATA_NODE_1'] ); +WARNING: distributed hypertable is deprecated WARNING: only one data node was assigned to the hypertable NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -3280,6 +3314,7 @@ NOTICE: adding not-null constraint to column "time" SET SCHEMA 'public'; CREATE TABLE disttable_drop_chunks(time timestamptz, device int CHECK (device > 0), color int, PRIMARY KEY (time,device)); SELECT * FROM create_distributed_hypertable('disttable_drop_chunks', 'time', 'device', number_partitions => 3, replication_factor => 2); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+-----------------------+--------- 10 | public | disttable_drop_chunks | t @@ -3312,6 +3347,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable_drop_chunks'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable_drop_chunks') @@ -3386,6 +3422,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable_drop_chunks'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable_drop_chunks') @@ -3440,6 +3477,7 @@ SELECT * FROM disttable_drop_chunks; CREATE TABLE "weird nAme\\#^."(time bigint, device int CHECK (device > 0), color int, PRIMARY KEY (time,device)); SELECT * FROM create_distributed_hypertable('"weird nAme\\#^."', 'time', 'device', 3, chunk_time_interval => 100, replication_factor => 2); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+-----------------+--------- 11 | public | weird nAme\\#^. | t @@ -3476,6 +3514,7 @@ SELECT * FROM "weird nAme\\#^."; DROP TABLE disttable CASCADE; CREATE TABLE disttable (time bigint, device int, temp float); SELECT create_distributed_hypertable('disttable', 'time', chunk_time_interval => 1000000::bigint); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -3487,6 +3526,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' @@ -3537,6 +3577,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' @@ -3603,6 +3644,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' @@ -3662,6 +3704,7 @@ SELECT detach_tablespaces('disttable'); -- Continue to use previously attached tablespace, but block attach/detach CREATE TABLE disttable2(time timestamptz, device int, temp float) TABLESPACE :TABLESPACE_1; SELECT create_distributed_hypertable('disttable2', 'time', chunk_time_interval => 1000000::bigint); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -3764,6 +3807,7 @@ DROP TABLESPACE :TABLESPACE_2; -- Make sure table qualified name is used in chunks_in function. Otherwise having a table name same as a column name might yield an error CREATE TABLE dist_device(time timestamptz, dist_device int, temp float); SELECT * FROM create_distributed_hypertable('dist_device', 'time'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+-------------+--------- @@ -3814,6 +3858,7 @@ SELECT * FROM dist_device; -- Test estimating relation size without stats CREATE TABLE hyper_estimate(time timestamptz, device int, temp float); SELECT * FROM create_distributed_hypertable('hyper_estimate', 'time', 'device', number_partitions => 3, replication_factor => 1, chunk_time_interval => INTERVAL '7 days'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+----------------+--------- @@ -3903,6 +3948,7 @@ CREATE TABLE hyper ( SELECT * FROM create_distributed_hypertable('hyper', 'time', 'device', 3, chunk_time_interval => interval '18 hours' ); +WARNING: distributed hypertable is deprecated WARNING: distributed hypertable "hyper" has a foreign key to a non-distributed table hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- @@ -3954,6 +4000,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4023,6 +4070,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4067,6 +4115,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4129,6 +4178,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4185,6 +4235,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4250,18 +4301,21 @@ CREATE TABLE disttable_with_relopts_2(time timestamptz NOT NULL, device int) WIT CREATE TABLE disttable_with_relopts_3(time timestamptz NOT NULL, device int); CREATE INDEX disttable_with_relopts_3_idx ON disttable_with_relopts_3(device) WITH (fillfactor=20); SELECT * FROM create_distributed_hypertable('disttable_with_relopts_1', 'time'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+--------------------------+--------- 18 | public | disttable_with_relopts_1 | t (1 row) SELECT * FROM create_distributed_hypertable('disttable_with_relopts_2', 'time'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+--------------------------+--------- 19 | public | disttable_with_relopts_2 | t (1 row) SELECT * FROM create_distributed_hypertable('disttable_with_relopts_3', 'time'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+--------------------------+--------- 20 | public | disttable_with_relopts_3 | t @@ -4326,6 +4380,7 @@ ORDER BY relname; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_1' ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_1' ORDER BY relname NOTICE: [db_dist_hypertable_1]: @@ -4361,6 +4416,7 @@ disttable_with_relopts_1|{fillfactor=10} SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_2' ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_2' ORDER BY relname NOTICE: [db_dist_hypertable_1]: @@ -4397,6 +4453,7 @@ disttable_with_relopts_2|{fillfactor=10,parallel_workers=1} SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_3_idx' ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_3_idx' ORDER BY relname NOTICE: [db_dist_hypertable_1]: @@ -4435,6 +4492,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname IN (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) @@ -4477,6 +4535,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_2')) ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname IN (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_2')) @@ -4540,6 +4599,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname IN (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) @@ -4601,6 +4661,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname IN (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) @@ -4645,6 +4706,7 @@ DROP TABLE disttable_with_relopts_3; -- CREATE TABLE disttable_serial(time timestamptz NOT NULL, device int, id1 SERIAL, id2 SMALLSERIAL, id3 BIGSERIAL); SELECT create_distributed_hypertable('disttable_serial', 'time', 'device'); +WARNING: distributed hypertable is deprecated create_distributed_hypertable -------------------------------- (21,public,disttable_serial,t) @@ -4664,6 +4726,7 @@ SELECT * FROM test.show_columns('disttable_serial'); SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM test.show_columns('disttable_serial'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM test.show_columns('disttable_serial') NOTICE: [db_dist_hypertable_1]: @@ -4726,6 +4789,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE FROM information_schema.columns WHERE table_name = 'disttable_serial'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT column_name, column_default FROM information_schema.columns @@ -4795,6 +4859,7 @@ SELECT currval('disttable_serial_id1_seq'::regclass), SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ SELECT currval('disttable_serial_id1_seq'::regclass); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT currval('disttable_serial_id1_seq'::regclass) ERROR: [db_dist_hypertable_1]: relation "disttable_serial_id1_seq" does not exist @@ -4814,6 +4879,7 @@ SELECT * from disttable_serial ORDER BY id1; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * from disttable_serial ORDER BY id1; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * from disttable_serial ORDER BY id1 NOTICE: [db_dist_hypertable_1]: @@ -4932,6 +4998,7 @@ CREATE TABLE test_1702 ( dummy71 int ); SELECT create_distributed_hypertable('test_1702', 'time', 'id'); +WARNING: distributed hypertable is deprecated WARNING: column type "character varying" used for "id" does not follow best practices WARNING: column type "timestamp without time zone" used for "time" does not follow best practices create_distributed_hypertable @@ -4990,6 +5057,7 @@ CREATE TABLE test_1702 ( dummy5 int ); SELECT create_distributed_hypertable('test_1702', 'time', 'id'); +WARNING: distributed hypertable is deprecated WARNING: column type "character varying" used for "id" does not follow best practices WARNING: column type "timestamp without time zone" used for "time" does not follow best practices create_distributed_hypertable @@ -5022,6 +5090,7 @@ CREATE TABLE whatever ( ); SELECT * FROM create_distributed_hypertable('whatever', 'timestamp', 'user_id', if_not_exists => true, chunk_time_interval => INTERVAL '1 day'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 24 | public | whatever | t @@ -5037,6 +5106,7 @@ SELECT last_value FROM _timescaledb_catalog.hypertable_id_seq; SELECT * FROM create_distributed_hypertable('whatever', 'timestamp', 'user_id', if_not_exists => true, chunk_time_interval => INTERVAL '1 day'); +WARNING: distributed hypertable is deprecated NOTICE: table "whatever" is already a hypertable, skipping hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- @@ -5065,9 +5135,11 @@ INSERT INTO dist_hypertable_1 VALUES \set ON_ERROR_STOP 0 SELECT * FROM create_distributed_hypertable('dist_hypertable_1', 'time', 'device', 3, migrate_data => FALSE); +WARNING: distributed hypertable is deprecated ERROR: table "dist_hypertable_1" is not empty SELECT * FROM create_distributed_hypertable('dist_hypertable_1', 'time', 'device', 3, migrate_data => TRUE); +WARNING: distributed hypertable is deprecated ERROR: cannot migrate data for distributed hypertable \set ON_ERROR_STOP 1 -- Test creating index with transaction per chunk on a distributed hypertable @@ -5079,6 +5151,7 @@ CREATE TABLE disttable( value float ); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 3); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 25 | public | disttable | t @@ -5101,6 +5174,7 @@ ERROR: cannot use timescaledb.transaction_per_chunk with distributed hypertable -- CREATE TABLE dist_syscol(time timestamptz NOT NULL, color int, temp float); SELECT * FROM create_distributed_hypertable('dist_syscol', 'time', 'color'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+-------------+--------- 26 | public | dist_syscol | t @@ -5174,6 +5248,7 @@ CREATE TABLE disttable( temp_c float ); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 27 | public | disttable | t @@ -5405,6 +5480,7 @@ CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler NOTICE: [db_dist_hypertable_2]: @@ -5419,6 +5495,7 @@ CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler -- Create distributed hypertable using non-default access method CREATE TABLE disttable(time timestamptz NOT NULL, device int, temp_c float, temp_f float GENERATED ALWAYS AS (temp_c * 9 / 5 + 32) STORED) USING test_am; SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 3); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 28 | public | disttable | t @@ -5433,6 +5510,7 @@ FROM pg_class cl, pg_am am WHERE cl.oid = 'disttable'::regclass AND cl.relam = am.oid; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT amname AS hypertable_amname @@ -5601,6 +5679,7 @@ SELECT * FROM disttable ORDER BY 1; -- CREATE TABLE test (time timestamp, v int); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -5616,6 +5695,7 @@ $$; CALL test_drop(); CREATE TABLE test (time timestamp, v int); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -5646,6 +5726,7 @@ SELECT test.tsl_override_current_timestamptz(null); CREATE TABLE test_tz (time timestamp, v int); SELECT create_distributed_hypertable('test_tz','time', chunk_time_interval => interval '1 hour'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -5968,6 +6049,7 @@ DROP TABLE test_tz; CREATE TABLE test_now (time timestamp, v int); SELECT create_distributed_hypertable('test_now','time', chunk_time_interval => interval '1 hour'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -6038,6 +6120,7 @@ DEALLOCATE test_query; -- CREATE TABLE test (time timestamp, v int); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -6061,6 +6144,7 @@ SELECT compress_chunk(show_chunks) FROM show_chunks('test'); DROP TABLE test; CREATE TABLE test (time timestamp, v int); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -6094,6 +6178,7 @@ DROP TABLE test; -- CREATE TABLE test (time timestamp NOT NULL, my_column int NOT NULL); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices create_distributed_hypertable ------------------------------- @@ -6116,6 +6201,7 @@ DROP TABLE test; -- Test insert into distributed hypertable with pruned chunks CREATE TABLE pruned_chunks_1(time TIMESTAMPTZ NOT NULL, sensor_id INTEGER, value FLOAT); SELECT table_name FROM create_distributed_hypertable('pruned_chunks_1', 'time', 'sensor_id'); +WARNING: distributed hypertable is deprecated table_name ----------------- pruned_chunks_1 @@ -6125,6 +6211,7 @@ INSERT INTO pruned_chunks_1 VALUES ('2020-12-09',1,32.2); CREATE TABLE pruned_chunks_2(time TIMESTAMPTZ NOT NULL, sensor_id INTEGER, value FLOAT); -- Convert the table to a distributed hypertable SELECT table_name FROM create_distributed_hypertable('pruned_chunks_2', 'time', 'sensor_id'); +WARNING: distributed hypertable is deprecated table_name ----------------- pruned_chunks_2 diff --git a/tsl/test/expected/dist_hypertable-14.out b/tsl/test/expected/dist_hypertable-14.out index ede4f2d53b3..a10d414f52b 100644 --- a/tsl/test/expected/dist_hypertable-14.out +++ b/tsl/test/expected/dist_hypertable-14.out @@ -21,6 +21,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------------+----------------------+--------------+------------------+------------------- db_dist_hypertable_1 | db_dist_hypertable_1 | t | t | t @@ -62,6 +65,7 @@ ERROR: operation not supported -- constraint to test how those work CREATE TABLE disttable(time timestamptz, device int CHECK (device > 0), color int, temp float, PRIMARY KEY (time,device)); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 1); +WARNING: distributed hypertable is deprecated WARNING: insufficient number of partitions for dimension "device" hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- @@ -136,6 +140,7 @@ ERROR: replication factor too large for hypertable "underreplicated" RESET ROLE; SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node(:'DATA_NODE_4', host => 'localhost', database => :'DATA_NODE_4'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------------+----------------------+--------------+------------------+------------------- db_dist_hypertable_4 | db_dist_hypertable_4 | t | t | t @@ -174,6 +179,7 @@ SELECT * FROM hypertable_partitions WHERE table_name = 'disttable'; (8 rows) SELECT attach_data_node(:'DATA_NODE_4', 'disttable', repartition => false); +WARNING: attaching data node is deprecated attach_data_node ---------------------------- (1,2,db_dist_hypertable_4) @@ -195,6 +201,7 @@ SELECT * FROM hypertable_partitions WHERE table_name = 'disttable'; --create new session to clear out connections \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; SELECT * FROM delete_data_node(:'DATA_NODE_4', force => true, drop_database => true, repartition => false); +WARNING: deleting data node is deprecated WARNING: insufficient number of data nodes for distributed hypertable "underreplicated" delete_data_node ------------------ @@ -520,6 +527,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable') @@ -561,6 +569,7 @@ chunk_id|hypertable_id|schema_name |table_name |relkind|slice SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM disttable; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM disttable NOTICE: [db_dist_hypertable_1]: @@ -1428,6 +1437,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable') @@ -1473,6 +1483,7 @@ chunk_id|hypertable_id|schema_name |table_name |relkind|slice SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM disttable; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM disttable NOTICE: [db_dist_hypertable_1]: @@ -1580,6 +1591,7 @@ ERROR: ON CONFLICT DO UPDATE not supported on distributed hypertables SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ INSERT INTO disttable VALUES ('2019-01-02 12:34', 1, 2, 9.3) $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: INSERT INTO disttable VALUES ('2019-01-02 12:34', 1, 2, 9.3) @@ -1589,6 +1601,7 @@ ERROR: [db_dist_hypertable_1]: distributed hypertable member cannot create chun SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ INSERT INTO disttable VALUES ('2017-09-03 06:09', 1, 2, 9.3) $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: INSERT INTO disttable VALUES ('2017-09-03 06:09', 1, 2, 9.3) @@ -1689,6 +1702,7 @@ SELECT * FROM disttable; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM disttable; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM disttable NOTICE: [db_dist_hypertable_1]: @@ -1762,6 +1776,7 @@ SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, SELECT * FROM show_chunks('disttable'); SELECT * FROM disttable; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk NOTICE: [db_dist_hypertable_1]: @@ -1882,6 +1897,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('underreplicated'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('underreplicated') @@ -1920,6 +1936,7 @@ chunk_id|hypertable_id|schema_name |table_name |relkind|slic SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM underreplicated; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM underreplicated NOTICE: [db_dist_hypertable_1]: @@ -1974,6 +1991,7 @@ SELECT * FROM underreplicated; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM underreplicated; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM underreplicated NOTICE: [db_dist_hypertable_1]: @@ -2020,6 +2038,7 @@ RETURNING *; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM underreplicated; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM underreplicated NOTICE: [db_dist_hypertable_1]: @@ -2057,6 +2076,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_3'], $$ CREATE TABLE remotetable(time timestamptz PRIMARY KEY, id int, cost float); SELECT * FROM underreplicated; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_3]: CREATE TABLE remotetable(time timestamptz PRIMARY KEY, id int, cost float) NOTICE: [db_dist_hypertable_3]: @@ -2080,6 +2100,7 @@ ERROR: [db_dist_hypertable_3]: relation "remotetable" already exists -- Test distributed_hypertable creation fails with replication factor 0 CREATE TABLE remotetable2(time timestamptz PRIMARY KEY, device int CHECK (device > 0), color int, temp float); SELECT * FROM create_distributed_hypertable('remotetable2', 'time', replication_factor => 0); +WARNING: distributed hypertable is deprecated ERROR: invalid replication factor \set ON_ERROR_STOP 1 SELECT * FROM timescaledb_information.hypertables @@ -2106,6 +2127,7 @@ SELECT * FROM create_distributed_hypertable('"Table\\Schema"."Param_Table"', 'ti associated_schema_name => 'T3sTSch', associated_table_prefix => 'test*pre_', chunk_time_interval => interval '1 week', create_default_indexes => FALSE, if_not_exists => TRUE, replication_factor => 2, data_nodes => ARRAY[:'DATA_NODE_2', :'DATA_NODE_3']); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time Col %#^#@$#" hypertable_id | schema_name | table_name | created ---------------+---------------+-------------+--------- @@ -2114,6 +2136,7 @@ NOTICE: adding not-null constraint to column "time Col %#^#@$#" -- Test detach and attach data node SELECT * FROM detach_data_node(:'DATA_NODE_2', '"Table\\Schema"."Param_Table"', force => true, drop_remote_data => true); +WARNING: detaching data node is deprecated WARNING: insufficient number of data nodes for distributed hypertable "Param_Table" NOTICE: the number of partitions in dimension "__region" of hypertable "Param_Table" was decreased to 1 detach_data_node @@ -2147,6 +2170,7 @@ ORDER BY 1, 2; (1 row) SELECT * FROM attach_data_node(:'DATA_NODE_1', '"Table\\Schema"."Param_Table"'); +WARNING: attaching data node is deprecated NOTICE: the number of partitions in dimension "__region" was increased to 2 hypertable_id | node_hypertable_id | node_name ---------------+--------------------+---------------------- @@ -2179,6 +2203,7 @@ ORDER BY 1, 2; -- Attach another data node but do not auto-repartition, i.e., -- increase the number of slices. SELECT * FROM attach_data_node(:'DATA_NODE_2', '"Table\\Schema"."Param_Table"', repartition => false); +WARNING: attaching data node is deprecated WARNING: insufficient number of partitions for dimension "__region" hypertable_id | node_hypertable_id | node_name ---------------+--------------------+---------------------- @@ -2235,6 +2260,7 @@ SELECT * FROM _timescaledb_catalog.dimension; SELECT t.tgname, t.tgtype, t.tgfoid::regproc FROM pg_trigger t, pg_class c WHERE c.relname = 'Param_Table' AND t.tgrelid = c.oid; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM _timescaledb_catalog.hypertable NOTICE: [db_dist_hypertable_1]: @@ -2344,6 +2370,7 @@ ts_insert_blocker| 7|_timescaledb_functions.insert_blocker -- Verify that repartitioning works as expected on detach_data_node SELECT * FROM detach_data_node(:'DATA_NODE_1', '"Table\\Schema"."Param_Table"', repartition => true); +WARNING: detaching data node is deprecated NOTICE: the number of partitions in dimension "__region" of hypertable "Param_Table" was decreased to 2 detach_data_node ------------------ @@ -2361,6 +2388,7 @@ AND h.table_name = 'Param_Table'; (2 rows) SELECT * FROM detach_data_node(:'DATA_NODE_2', '"Table\\Schema"."Param_Table"', force => true, repartition => false); +WARNING: detaching data node is deprecated WARNING: insufficient number of data nodes for distributed hypertable "Param_Table" detach_data_node ------------------ @@ -2381,6 +2409,7 @@ AND h.table_name = 'Param_Table'; -- should be propagated to backends. CREATE TABLE dimented_table (time timestamptz, column1 int, column2 timestamptz, column3 int); SELECT * FROM create_distributed_hypertable('dimented_table', 'time', partitioning_column => 'column1', number_partitions => 4, replication_factor => 1, data_nodes => ARRAY[:'DATA_NODE_1']); +WARNING: distributed hypertable is deprecated WARNING: only one data node was assigned to the hypertable NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created @@ -2459,6 +2488,7 @@ SELECT * FROM _timescaledb_catalog.dimension; (9 rows) SELECT * FROM attach_data_node(:'DATA_NODE_2', 'dimented_table'); +WARNING: attaching data node is deprecated hypertable_id | node_hypertable_id | node_name ---------------+--------------------+---------------------- 5 | 5 | db_dist_hypertable_2 @@ -2482,6 +2512,7 @@ SELECT * FROM _timescaledb_catalog.dimension; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM _timescaledb_catalog.dimension; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM _timescaledb_catalog.dimension NOTICE: [db_dist_hypertable_1]: @@ -3093,6 +3124,7 @@ SELECT * FROM twodim ORDER BY time; SELECT count(*) FROM twodim; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM twodim ORDER BY time @@ -3240,6 +3272,7 @@ SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct NOTICE: [db_dist_hypertable_1]: @@ -3277,6 +3310,7 @@ CREATE TABLE "disttable'quote"(time timestamptz, "device'quote" int, val float, SELECT public.create_distributed_hypertable( 'disttable''quote', 'time', 'device''quote', data_nodes => ARRAY[:'DATA_NODE_1'] ); +WARNING: distributed hypertable is deprecated WARNING: only one data node was assigned to the hypertable NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -3287,6 +3321,7 @@ NOTICE: adding not-null constraint to column "time" SET SCHEMA 'public'; CREATE TABLE disttable_drop_chunks(time timestamptz, device int CHECK (device > 0), color int, PRIMARY KEY (time,device)); SELECT * FROM create_distributed_hypertable('disttable_drop_chunks', 'time', 'device', number_partitions => 3, replication_factor => 2); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+-----------------------+--------- 10 | public | disttable_drop_chunks | t @@ -3319,6 +3354,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable_drop_chunks'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable_drop_chunks') @@ -3393,6 +3429,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable_drop_chunks'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable_drop_chunks') @@ -3447,6 +3484,7 @@ SELECT * FROM disttable_drop_chunks; CREATE TABLE "weird nAme\\#^."(time bigint, device int CHECK (device > 0), color int, PRIMARY KEY (time,device)); SELECT * FROM create_distributed_hypertable('"weird nAme\\#^."', 'time', 'device', 3, chunk_time_interval => 100, replication_factor => 2); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+-----------------+--------- 11 | public | weird nAme\\#^. | t @@ -3483,6 +3521,7 @@ SELECT * FROM "weird nAme\\#^."; DROP TABLE disttable CASCADE; CREATE TABLE disttable (time bigint, device int, temp float); SELECT create_distributed_hypertable('disttable', 'time', chunk_time_interval => 1000000::bigint); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -3494,6 +3533,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' @@ -3544,6 +3584,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' @@ -3610,6 +3651,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' @@ -3669,6 +3711,7 @@ SELECT detach_tablespaces('disttable'); -- Continue to use previously attached tablespace, but block attach/detach CREATE TABLE disttable2(time timestamptz, device int, temp float) TABLESPACE :TABLESPACE_1; SELECT create_distributed_hypertable('disttable2', 'time', chunk_time_interval => 1000000::bigint); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -3771,6 +3814,7 @@ DROP TABLESPACE :TABLESPACE_2; -- Make sure table qualified name is used in chunks_in function. Otherwise having a table name same as a column name might yield an error CREATE TABLE dist_device(time timestamptz, dist_device int, temp float); SELECT * FROM create_distributed_hypertable('dist_device', 'time'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+-------------+--------- @@ -3821,6 +3865,7 @@ SELECT * FROM dist_device; -- Test estimating relation size without stats CREATE TABLE hyper_estimate(time timestamptz, device int, temp float); SELECT * FROM create_distributed_hypertable('hyper_estimate', 'time', 'device', number_partitions => 3, replication_factor => 1, chunk_time_interval => INTERVAL '7 days'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+----------------+--------- @@ -3910,6 +3955,7 @@ CREATE TABLE hyper ( SELECT * FROM create_distributed_hypertable('hyper', 'time', 'device', 3, chunk_time_interval => interval '18 hours' ); +WARNING: distributed hypertable is deprecated WARNING: distributed hypertable "hyper" has a foreign key to a non-distributed table hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- @@ -3961,6 +4007,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4030,6 +4077,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4074,6 +4122,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4136,6 +4185,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4192,6 +4242,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4257,18 +4308,21 @@ CREATE TABLE disttable_with_relopts_2(time timestamptz NOT NULL, device int) WIT CREATE TABLE disttable_with_relopts_3(time timestamptz NOT NULL, device int); CREATE INDEX disttable_with_relopts_3_idx ON disttable_with_relopts_3(device) WITH (fillfactor=20); SELECT * FROM create_distributed_hypertable('disttable_with_relopts_1', 'time'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+--------------------------+--------- 18 | public | disttable_with_relopts_1 | t (1 row) SELECT * FROM create_distributed_hypertable('disttable_with_relopts_2', 'time'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+--------------------------+--------- 19 | public | disttable_with_relopts_2 | t (1 row) SELECT * FROM create_distributed_hypertable('disttable_with_relopts_3', 'time'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+--------------------------+--------- 20 | public | disttable_with_relopts_3 | t @@ -4333,6 +4387,7 @@ ORDER BY relname; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_1' ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_1' ORDER BY relname NOTICE: [db_dist_hypertable_1]: @@ -4368,6 +4423,7 @@ disttable_with_relopts_1|{fillfactor=10} SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_2' ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_2' ORDER BY relname NOTICE: [db_dist_hypertable_1]: @@ -4404,6 +4460,7 @@ disttable_with_relopts_2|{fillfactor=10,parallel_workers=1} SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_3_idx' ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_3_idx' ORDER BY relname NOTICE: [db_dist_hypertable_1]: @@ -4442,6 +4499,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname IN (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) @@ -4484,6 +4542,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_2')) ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname IN (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_2')) @@ -4547,6 +4606,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname IN (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) @@ -4608,6 +4668,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname IN (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) @@ -4652,6 +4713,7 @@ DROP TABLE disttable_with_relopts_3; -- CREATE TABLE disttable_serial(time timestamptz NOT NULL, device int, id1 SERIAL, id2 SMALLSERIAL, id3 BIGSERIAL); SELECT create_distributed_hypertable('disttable_serial', 'time', 'device'); +WARNING: distributed hypertable is deprecated create_distributed_hypertable -------------------------------- (21,public,disttable_serial,t) @@ -4671,6 +4733,7 @@ SELECT * FROM test.show_columns('disttable_serial'); SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM test.show_columns('disttable_serial'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM test.show_columns('disttable_serial') NOTICE: [db_dist_hypertable_1]: @@ -4733,6 +4796,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE FROM information_schema.columns WHERE table_name = 'disttable_serial'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT column_name, column_default FROM information_schema.columns @@ -4802,6 +4866,7 @@ SELECT currval('disttable_serial_id1_seq'::regclass), SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ SELECT currval('disttable_serial_id1_seq'::regclass); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT currval('disttable_serial_id1_seq'::regclass) ERROR: [db_dist_hypertable_1]: relation "disttable_serial_id1_seq" does not exist @@ -4821,6 +4886,7 @@ SELECT * from disttable_serial ORDER BY id1; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * from disttable_serial ORDER BY id1; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * from disttable_serial ORDER BY id1 NOTICE: [db_dist_hypertable_1]: @@ -4939,6 +5005,7 @@ CREATE TABLE test_1702 ( dummy71 int ); SELECT create_distributed_hypertable('test_1702', 'time', 'id'); +WARNING: distributed hypertable is deprecated WARNING: column type "character varying" used for "id" does not follow best practices WARNING: column type "timestamp without time zone" used for "time" does not follow best practices create_distributed_hypertable @@ -4997,6 +5064,7 @@ CREATE TABLE test_1702 ( dummy5 int ); SELECT create_distributed_hypertable('test_1702', 'time', 'id'); +WARNING: distributed hypertable is deprecated WARNING: column type "character varying" used for "id" does not follow best practices WARNING: column type "timestamp without time zone" used for "time" does not follow best practices create_distributed_hypertable @@ -5029,6 +5097,7 @@ CREATE TABLE whatever ( ); SELECT * FROM create_distributed_hypertable('whatever', 'timestamp', 'user_id', if_not_exists => true, chunk_time_interval => INTERVAL '1 day'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 24 | public | whatever | t @@ -5044,6 +5113,7 @@ SELECT last_value FROM _timescaledb_catalog.hypertable_id_seq; SELECT * FROM create_distributed_hypertable('whatever', 'timestamp', 'user_id', if_not_exists => true, chunk_time_interval => INTERVAL '1 day'); +WARNING: distributed hypertable is deprecated NOTICE: table "whatever" is already a hypertable, skipping hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- @@ -5072,9 +5142,11 @@ INSERT INTO dist_hypertable_1 VALUES \set ON_ERROR_STOP 0 SELECT * FROM create_distributed_hypertable('dist_hypertable_1', 'time', 'device', 3, migrate_data => FALSE); +WARNING: distributed hypertable is deprecated ERROR: table "dist_hypertable_1" is not empty SELECT * FROM create_distributed_hypertable('dist_hypertable_1', 'time', 'device', 3, migrate_data => TRUE); +WARNING: distributed hypertable is deprecated ERROR: cannot migrate data for distributed hypertable \set ON_ERROR_STOP 1 -- Test creating index with transaction per chunk on a distributed hypertable @@ -5086,6 +5158,7 @@ CREATE TABLE disttable( value float ); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 3); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 25 | public | disttable | t @@ -5108,6 +5181,7 @@ ERROR: cannot use timescaledb.transaction_per_chunk with distributed hypertable -- CREATE TABLE dist_syscol(time timestamptz NOT NULL, color int, temp float); SELECT * FROM create_distributed_hypertable('dist_syscol', 'time', 'color'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+-------------+--------- 26 | public | dist_syscol | t @@ -5181,6 +5255,7 @@ CREATE TABLE disttable( temp_c float ); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 27 | public | disttable | t @@ -5450,6 +5525,7 @@ CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler NOTICE: [db_dist_hypertable_2]: @@ -5464,6 +5540,7 @@ CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler -- Create distributed hypertable using non-default access method CREATE TABLE disttable(time timestamptz NOT NULL, device int, temp_c float, temp_f float GENERATED ALWAYS AS (temp_c * 9 / 5 + 32) STORED) USING test_am; SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 3); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 28 | public | disttable | t @@ -5478,6 +5555,7 @@ FROM pg_class cl, pg_am am WHERE cl.oid = 'disttable'::regclass AND cl.relam = am.oid; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT amname AS hypertable_amname @@ -5646,6 +5724,7 @@ SELECT * FROM disttable ORDER BY 1; -- CREATE TABLE test (time timestamp, v int); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -5661,6 +5740,7 @@ $$; CALL test_drop(); CREATE TABLE test (time timestamp, v int); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -5691,6 +5771,7 @@ SELECT test.tsl_override_current_timestamptz(null); CREATE TABLE test_tz (time timestamp, v int); SELECT create_distributed_hypertable('test_tz','time', chunk_time_interval => interval '1 hour'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -6013,6 +6094,7 @@ DROP TABLE test_tz; CREATE TABLE test_now (time timestamp, v int); SELECT create_distributed_hypertable('test_now','time', chunk_time_interval => interval '1 hour'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -6083,6 +6165,7 @@ DEALLOCATE test_query; -- CREATE TABLE test (time timestamp, v int); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -6106,6 +6189,7 @@ SELECT compress_chunk(show_chunks) FROM show_chunks('test'); DROP TABLE test; CREATE TABLE test (time timestamp, v int); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -6139,6 +6223,7 @@ DROP TABLE test; -- CREATE TABLE test (time timestamp NOT NULL, my_column int NOT NULL); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices create_distributed_hypertable ------------------------------- @@ -6161,6 +6246,7 @@ DROP TABLE test; -- Test insert into distributed hypertable with pruned chunks CREATE TABLE pruned_chunks_1(time TIMESTAMPTZ NOT NULL, sensor_id INTEGER, value FLOAT); SELECT table_name FROM create_distributed_hypertable('pruned_chunks_1', 'time', 'sensor_id'); +WARNING: distributed hypertable is deprecated table_name ----------------- pruned_chunks_1 @@ -6170,6 +6256,7 @@ INSERT INTO pruned_chunks_1 VALUES ('2020-12-09',1,32.2); CREATE TABLE pruned_chunks_2(time TIMESTAMPTZ NOT NULL, sensor_id INTEGER, value FLOAT); -- Convert the table to a distributed hypertable SELECT table_name FROM create_distributed_hypertable('pruned_chunks_2', 'time', 'sensor_id'); +WARNING: distributed hypertable is deprecated table_name ----------------- pruned_chunks_2 diff --git a/tsl/test/expected/dist_hypertable-15.out b/tsl/test/expected/dist_hypertable-15.out index 23f808ff7ba..87096ac7df6 100644 --- a/tsl/test/expected/dist_hypertable-15.out +++ b/tsl/test/expected/dist_hypertable-15.out @@ -21,6 +21,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------------+----------------------+--------------+------------------+------------------- db_dist_hypertable_1 | db_dist_hypertable_1 | t | t | t @@ -62,6 +65,7 @@ ERROR: operation not supported -- constraint to test how those work CREATE TABLE disttable(time timestamptz, device int CHECK (device > 0), color int, temp float, PRIMARY KEY (time,device)); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 1); +WARNING: distributed hypertable is deprecated WARNING: insufficient number of partitions for dimension "device" hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- @@ -136,6 +140,7 @@ ERROR: replication factor too large for hypertable "underreplicated" RESET ROLE; SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node(:'DATA_NODE_4', host => 'localhost', database => :'DATA_NODE_4'); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------------+----------------------+--------------+------------------+------------------- db_dist_hypertable_4 | db_dist_hypertable_4 | t | t | t @@ -174,6 +179,7 @@ SELECT * FROM hypertable_partitions WHERE table_name = 'disttable'; (8 rows) SELECT attach_data_node(:'DATA_NODE_4', 'disttable', repartition => false); +WARNING: attaching data node is deprecated attach_data_node ---------------------------- (1,2,db_dist_hypertable_4) @@ -195,6 +201,7 @@ SELECT * FROM hypertable_partitions WHERE table_name = 'disttable'; --create new session to clear out connections \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; SELECT * FROM delete_data_node(:'DATA_NODE_4', force => true, drop_database => true, repartition => false); +WARNING: deleting data node is deprecated WARNING: insufficient number of data nodes for distributed hypertable "underreplicated" delete_data_node ------------------ @@ -520,6 +527,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable') @@ -561,6 +569,7 @@ chunk_id|hypertable_id|schema_name |table_name |relkind|slice SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM disttable; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM disttable NOTICE: [db_dist_hypertable_1]: @@ -1434,6 +1443,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable') @@ -1479,6 +1489,7 @@ chunk_id|hypertable_id|schema_name |table_name |relkind|slice SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM disttable; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM disttable NOTICE: [db_dist_hypertable_1]: @@ -1586,6 +1597,7 @@ ERROR: ON CONFLICT DO UPDATE not supported on distributed hypertables SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ INSERT INTO disttable VALUES ('2019-01-02 12:34', 1, 2, 9.3) $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: INSERT INTO disttable VALUES ('2019-01-02 12:34', 1, 2, 9.3) @@ -1595,6 +1607,7 @@ ERROR: [db_dist_hypertable_1]: distributed hypertable member cannot create chun SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ INSERT INTO disttable VALUES ('2017-09-03 06:09', 1, 2, 9.3) $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: INSERT INTO disttable VALUES ('2017-09-03 06:09', 1, 2, 9.3) @@ -1695,6 +1708,7 @@ SELECT * FROM disttable; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM disttable; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM disttable NOTICE: [db_dist_hypertable_1]: @@ -1768,6 +1782,7 @@ SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, SELECT * FROM show_chunks('disttable'); SELECT * FROM disttable; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk NOTICE: [db_dist_hypertable_1]: @@ -1888,6 +1903,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('underreplicated'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('underreplicated') @@ -1926,6 +1942,7 @@ chunk_id|hypertable_id|schema_name |table_name |relkind|slic SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM underreplicated; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM underreplicated NOTICE: [db_dist_hypertable_1]: @@ -1980,6 +1997,7 @@ SELECT * FROM underreplicated; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM underreplicated; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM underreplicated NOTICE: [db_dist_hypertable_1]: @@ -2026,6 +2044,7 @@ RETURNING *; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM underreplicated; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM underreplicated NOTICE: [db_dist_hypertable_1]: @@ -2063,6 +2082,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_3'], $$ CREATE TABLE remotetable(time timestamptz PRIMARY KEY, id int, cost float); SELECT * FROM underreplicated; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_3]: CREATE TABLE remotetable(time timestamptz PRIMARY KEY, id int, cost float) NOTICE: [db_dist_hypertable_3]: @@ -2086,6 +2106,7 @@ ERROR: [db_dist_hypertable_3]: relation "remotetable" already exists -- Test distributed_hypertable creation fails with replication factor 0 CREATE TABLE remotetable2(time timestamptz PRIMARY KEY, device int CHECK (device > 0), color int, temp float); SELECT * FROM create_distributed_hypertable('remotetable2', 'time', replication_factor => 0); +WARNING: distributed hypertable is deprecated ERROR: invalid replication factor \set ON_ERROR_STOP 1 SELECT * FROM timescaledb_information.hypertables @@ -2112,6 +2133,7 @@ SELECT * FROM create_distributed_hypertable('"Table\\Schema"."Param_Table"', 'ti associated_schema_name => 'T3sTSch', associated_table_prefix => 'test*pre_', chunk_time_interval => interval '1 week', create_default_indexes => FALSE, if_not_exists => TRUE, replication_factor => 2, data_nodes => ARRAY[:'DATA_NODE_2', :'DATA_NODE_3']); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time Col %#^#@$#" hypertable_id | schema_name | table_name | created ---------------+---------------+-------------+--------- @@ -2120,6 +2142,7 @@ NOTICE: adding not-null constraint to column "time Col %#^#@$#" -- Test detach and attach data node SELECT * FROM detach_data_node(:'DATA_NODE_2', '"Table\\Schema"."Param_Table"', force => true, drop_remote_data => true); +WARNING: detaching data node is deprecated WARNING: insufficient number of data nodes for distributed hypertable "Param_Table" NOTICE: the number of partitions in dimension "__region" of hypertable "Param_Table" was decreased to 1 detach_data_node @@ -2153,6 +2176,7 @@ ORDER BY 1, 2; (1 row) SELECT * FROM attach_data_node(:'DATA_NODE_1', '"Table\\Schema"."Param_Table"'); +WARNING: attaching data node is deprecated NOTICE: the number of partitions in dimension "__region" was increased to 2 hypertable_id | node_hypertable_id | node_name ---------------+--------------------+---------------------- @@ -2185,6 +2209,7 @@ ORDER BY 1, 2; -- Attach another data node but do not auto-repartition, i.e., -- increase the number of slices. SELECT * FROM attach_data_node(:'DATA_NODE_2', '"Table\\Schema"."Param_Table"', repartition => false); +WARNING: attaching data node is deprecated WARNING: insufficient number of partitions for dimension "__region" hypertable_id | node_hypertable_id | node_name ---------------+--------------------+---------------------- @@ -2241,6 +2266,7 @@ SELECT * FROM _timescaledb_catalog.dimension; SELECT t.tgname, t.tgtype, t.tgfoid::regproc FROM pg_trigger t, pg_class c WHERE c.relname = 'Param_Table' AND t.tgrelid = c.oid; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM _timescaledb_catalog.hypertable NOTICE: [db_dist_hypertable_1]: @@ -2350,6 +2376,7 @@ ts_insert_blocker| 7|_timescaledb_functions.insert_blocker -- Verify that repartitioning works as expected on detach_data_node SELECT * FROM detach_data_node(:'DATA_NODE_1', '"Table\\Schema"."Param_Table"', repartition => true); +WARNING: detaching data node is deprecated NOTICE: the number of partitions in dimension "__region" of hypertable "Param_Table" was decreased to 2 detach_data_node ------------------ @@ -2367,6 +2394,7 @@ AND h.table_name = 'Param_Table'; (2 rows) SELECT * FROM detach_data_node(:'DATA_NODE_2', '"Table\\Schema"."Param_Table"', force => true, repartition => false); +WARNING: detaching data node is deprecated WARNING: insufficient number of data nodes for distributed hypertable "Param_Table" detach_data_node ------------------ @@ -2387,6 +2415,7 @@ AND h.table_name = 'Param_Table'; -- should be propagated to backends. CREATE TABLE dimented_table (time timestamptz, column1 int, column2 timestamptz, column3 int); SELECT * FROM create_distributed_hypertable('dimented_table', 'time', partitioning_column => 'column1', number_partitions => 4, replication_factor => 1, data_nodes => ARRAY[:'DATA_NODE_1']); +WARNING: distributed hypertable is deprecated WARNING: only one data node was assigned to the hypertable NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created @@ -2465,6 +2494,7 @@ SELECT * FROM _timescaledb_catalog.dimension; (9 rows) SELECT * FROM attach_data_node(:'DATA_NODE_2', 'dimented_table'); +WARNING: attaching data node is deprecated hypertable_id | node_hypertable_id | node_name ---------------+--------------------+---------------------- 5 | 5 | db_dist_hypertable_2 @@ -2488,6 +2518,7 @@ SELECT * FROM _timescaledb_catalog.dimension; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM _timescaledb_catalog.dimension; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM _timescaledb_catalog.dimension NOTICE: [db_dist_hypertable_1]: @@ -3099,6 +3130,7 @@ SELECT * FROM twodim ORDER BY time; SELECT count(*) FROM twodim; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM twodim ORDER BY time @@ -3246,6 +3278,7 @@ SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT time, txn_id, val, substring(info for 20) FROM disttable_with_ct NOTICE: [db_dist_hypertable_1]: @@ -3283,6 +3316,7 @@ CREATE TABLE "disttable'quote"(time timestamptz, "device'quote" int, val float, SELECT public.create_distributed_hypertable( 'disttable''quote', 'time', 'device''quote', data_nodes => ARRAY[:'DATA_NODE_1'] ); +WARNING: distributed hypertable is deprecated WARNING: only one data node was assigned to the hypertable NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -3293,6 +3327,7 @@ NOTICE: adding not-null constraint to column "time" SET SCHEMA 'public'; CREATE TABLE disttable_drop_chunks(time timestamptz, device int CHECK (device > 0), color int, PRIMARY KEY (time,device)); SELECT * FROM create_distributed_hypertable('disttable_drop_chunks', 'time', 'device', number_partitions => 3, replication_factor => 2); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+-----------------------+--------- 10 | public | disttable_drop_chunks | t @@ -3325,6 +3360,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable_drop_chunks'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable_drop_chunks') @@ -3399,6 +3435,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable_drop_chunks'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('disttable_drop_chunks') @@ -3453,6 +3490,7 @@ SELECT * FROM disttable_drop_chunks; CREATE TABLE "weird nAme\\#^."(time bigint, device int CHECK (device > 0), color int, PRIMARY KEY (time,device)); SELECT * FROM create_distributed_hypertable('"weird nAme\\#^."', 'time', 'device', 3, chunk_time_interval => 100, replication_factor => 2); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+-----------------+--------- 11 | public | weird nAme\\#^. | t @@ -3489,6 +3527,7 @@ SELECT * FROM "weird nAme\\#^."; DROP TABLE disttable CASCADE; CREATE TABLE disttable (time bigint, device int, temp float); SELECT create_distributed_hypertable('disttable', 'time', chunk_time_interval => 1000000::bigint); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -3500,6 +3539,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' @@ -3550,6 +3590,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' @@ -3616,6 +3657,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' @@ -3675,6 +3717,7 @@ SELECT detach_tablespaces('disttable'); -- Continue to use previously attached tablespace, but block attach/detach CREATE TABLE disttable2(time timestamptz, device int, temp float) TABLESPACE :TABLESPACE_1; SELECT create_distributed_hypertable('disttable2', 'time', chunk_time_interval => 1000000::bigint); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" create_distributed_hypertable ------------------------------- @@ -3777,6 +3820,7 @@ DROP TABLESPACE :TABLESPACE_2; -- Make sure table qualified name is used in chunks_in function. Otherwise having a table name same as a column name might yield an error CREATE TABLE dist_device(time timestamptz, dist_device int, temp float); SELECT * FROM create_distributed_hypertable('dist_device', 'time'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+-------------+--------- @@ -3827,6 +3871,7 @@ SELECT * FROM dist_device; -- Test estimating relation size without stats CREATE TABLE hyper_estimate(time timestamptz, device int, temp float); SELECT * FROM create_distributed_hypertable('hyper_estimate', 'time', 'device', number_partitions => 3, replication_factor => 1, chunk_time_interval => INTERVAL '7 days'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+----------------+--------- @@ -3916,6 +3961,7 @@ CREATE TABLE hyper ( SELECT * FROM create_distributed_hypertable('hyper', 'time', 'device', 3, chunk_time_interval => interval '18 hours' ); +WARNING: distributed hypertable is deprecated WARNING: distributed hypertable "hyper" has a foreign key to a non-distributed table hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- @@ -3967,6 +4013,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4036,6 +4083,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4080,6 +4128,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4142,6 +4191,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4198,6 +4248,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT (_timescaledb_functions.show_chunk(show_chunks)).* FROM show_chunks('hyper') @@ -4263,18 +4314,21 @@ CREATE TABLE disttable_with_relopts_2(time timestamptz NOT NULL, device int) WIT CREATE TABLE disttable_with_relopts_3(time timestamptz NOT NULL, device int); CREATE INDEX disttable_with_relopts_3_idx ON disttable_with_relopts_3(device) WITH (fillfactor=20); SELECT * FROM create_distributed_hypertable('disttable_with_relopts_1', 'time'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+--------------------------+--------- 18 | public | disttable_with_relopts_1 | t (1 row) SELECT * FROM create_distributed_hypertable('disttable_with_relopts_2', 'time'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+--------------------------+--------- 19 | public | disttable_with_relopts_2 | t (1 row) SELECT * FROM create_distributed_hypertable('disttable_with_relopts_3', 'time'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+--------------------------+--------- 20 | public | disttable_with_relopts_3 | t @@ -4339,6 +4393,7 @@ ORDER BY relname; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_1' ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_1' ORDER BY relname NOTICE: [db_dist_hypertable_1]: @@ -4374,6 +4429,7 @@ disttable_with_relopts_1|{fillfactor=10} SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_2' ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_2' ORDER BY relname NOTICE: [db_dist_hypertable_1]: @@ -4410,6 +4466,7 @@ disttable_with_relopts_2|{fillfactor=10,parallel_workers=1} SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_3_idx' ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname = 'disttable_with_relopts_3_idx' ORDER BY relname NOTICE: [db_dist_hypertable_1]: @@ -4448,6 +4505,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname IN (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) @@ -4490,6 +4548,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_2')) ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname IN (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_2')) @@ -4553,6 +4612,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname IN (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) @@ -4614,6 +4674,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) ORDER BY relname; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT relname, reloptions FROM pg_class WHERE relname IN (SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name FROM show_chunks('disttable_with_relopts_1')) @@ -4658,6 +4719,7 @@ DROP TABLE disttable_with_relopts_3; -- CREATE TABLE disttable_serial(time timestamptz NOT NULL, device int, id1 SERIAL, id2 SMALLSERIAL, id3 BIGSERIAL); SELECT create_distributed_hypertable('disttable_serial', 'time', 'device'); +WARNING: distributed hypertable is deprecated create_distributed_hypertable -------------------------------- (21,public,disttable_serial,t) @@ -4677,6 +4739,7 @@ SELECT * FROM test.show_columns('disttable_serial'); SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * FROM test.show_columns('disttable_serial'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * FROM test.show_columns('disttable_serial') NOTICE: [db_dist_hypertable_1]: @@ -4739,6 +4802,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE FROM information_schema.columns WHERE table_name = 'disttable_serial'; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT column_name, column_default FROM information_schema.columns @@ -4808,6 +4872,7 @@ SELECT currval('disttable_serial_id1_seq'::regclass), SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ SELECT currval('disttable_serial_id1_seq'::regclass); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT currval('disttable_serial_id1_seq'::regclass) ERROR: [db_dist_hypertable_1]: relation "disttable_serial_id1_seq" does not exist @@ -4827,6 +4892,7 @@ SELECT * from disttable_serial ORDER BY id1; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ SELECT * from disttable_serial ORDER BY id1; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT * from disttable_serial ORDER BY id1 NOTICE: [db_dist_hypertable_1]: @@ -4945,6 +5011,7 @@ CREATE TABLE test_1702 ( dummy71 int ); SELECT create_distributed_hypertable('test_1702', 'time', 'id'); +WARNING: distributed hypertable is deprecated WARNING: column type "character varying" used for "id" does not follow best practices WARNING: column type "timestamp without time zone" used for "time" does not follow best practices create_distributed_hypertable @@ -5003,6 +5070,7 @@ CREATE TABLE test_1702 ( dummy5 int ); SELECT create_distributed_hypertable('test_1702', 'time', 'id'); +WARNING: distributed hypertable is deprecated WARNING: column type "character varying" used for "id" does not follow best practices WARNING: column type "timestamp without time zone" used for "time" does not follow best practices create_distributed_hypertable @@ -5035,6 +5103,7 @@ CREATE TABLE whatever ( ); SELECT * FROM create_distributed_hypertable('whatever', 'timestamp', 'user_id', if_not_exists => true, chunk_time_interval => INTERVAL '1 day'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 24 | public | whatever | t @@ -5050,6 +5119,7 @@ SELECT last_value FROM _timescaledb_catalog.hypertable_id_seq; SELECT * FROM create_distributed_hypertable('whatever', 'timestamp', 'user_id', if_not_exists => true, chunk_time_interval => INTERVAL '1 day'); +WARNING: distributed hypertable is deprecated NOTICE: table "whatever" is already a hypertable, skipping hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- @@ -5078,9 +5148,11 @@ INSERT INTO dist_hypertable_1 VALUES \set ON_ERROR_STOP 0 SELECT * FROM create_distributed_hypertable('dist_hypertable_1', 'time', 'device', 3, migrate_data => FALSE); +WARNING: distributed hypertable is deprecated ERROR: table "dist_hypertable_1" is not empty SELECT * FROM create_distributed_hypertable('dist_hypertable_1', 'time', 'device', 3, migrate_data => TRUE); +WARNING: distributed hypertable is deprecated ERROR: cannot migrate data for distributed hypertable \set ON_ERROR_STOP 1 -- Test creating index with transaction per chunk on a distributed hypertable @@ -5092,6 +5164,7 @@ CREATE TABLE disttable( value float ); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 3); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 25 | public | disttable | t @@ -5114,6 +5187,7 @@ ERROR: cannot use timescaledb.transaction_per_chunk with distributed hypertable -- CREATE TABLE dist_syscol(time timestamptz NOT NULL, color int, temp float); SELECT * FROM create_distributed_hypertable('dist_syscol', 'time', 'color'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+-------------+--------- 26 | public | dist_syscol | t @@ -5187,6 +5261,7 @@ CREATE TABLE disttable( temp_c float ); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 27 | public | disttable | t @@ -5462,6 +5537,7 @@ CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler; SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE_3'], $$ CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler NOTICE: [db_dist_hypertable_2]: @@ -5476,6 +5552,7 @@ CREATE ACCESS METHOD test_am TYPE TABLE HANDLER heap_tableam_handler -- Create distributed hypertable using non-default access method CREATE TABLE disttable(time timestamptz NOT NULL, device int, temp_c float, temp_f float GENERATED ALWAYS AS (temp_c * 9 / 5 + 32) STORED) USING test_am; SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', 3); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 28 | public | disttable | t @@ -5490,6 +5567,7 @@ FROM pg_class cl, pg_am am WHERE cl.oid = 'disttable'::regclass AND cl.relam = am.oid; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_hypertable_1]: SELECT amname AS hypertable_amname @@ -5658,6 +5736,7 @@ SELECT * FROM disttable ORDER BY 1; -- CREATE TABLE test (time timestamp, v int); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -5673,6 +5752,7 @@ $$; CALL test_drop(); CREATE TABLE test (time timestamp, v int); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -5703,6 +5783,7 @@ SELECT test.tsl_override_current_timestamptz(null); CREATE TABLE test_tz (time timestamp, v int); SELECT create_distributed_hypertable('test_tz','time', chunk_time_interval => interval '1 hour'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -6029,6 +6110,7 @@ DROP TABLE test_tz; CREATE TABLE test_now (time timestamp, v int); SELECT create_distributed_hypertable('test_now','time', chunk_time_interval => interval '1 hour'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -6099,6 +6181,7 @@ DEALLOCATE test_query; -- CREATE TABLE test (time timestamp, v int); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -6122,6 +6205,7 @@ SELECT compress_chunk(show_chunks) FROM show_chunks('test'); DROP TABLE test; CREATE TABLE test (time timestamp, v int); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices NOTICE: adding not-null constraint to column "time" create_distributed_hypertable @@ -6155,6 +6239,7 @@ DROP TABLE test; -- CREATE TABLE test (time timestamp NOT NULL, my_column int NOT NULL); SELECT create_distributed_hypertable('test','time'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices create_distributed_hypertable ------------------------------- @@ -6177,6 +6262,7 @@ DROP TABLE test; -- Test insert into distributed hypertable with pruned chunks CREATE TABLE pruned_chunks_1(time TIMESTAMPTZ NOT NULL, sensor_id INTEGER, value FLOAT); SELECT table_name FROM create_distributed_hypertable('pruned_chunks_1', 'time', 'sensor_id'); +WARNING: distributed hypertable is deprecated table_name ----------------- pruned_chunks_1 @@ -6186,6 +6272,7 @@ INSERT INTO pruned_chunks_1 VALUES ('2020-12-09',1,32.2); CREATE TABLE pruned_chunks_2(time TIMESTAMPTZ NOT NULL, sensor_id INTEGER, value FLOAT); -- Convert the table to a distributed hypertable SELECT table_name FROM create_distributed_hypertable('pruned_chunks_2', 'time', 'sensor_id'); +WARNING: distributed hypertable is deprecated table_name ----------------- pruned_chunks_2 diff --git a/tsl/test/expected/dist_move_chunk.out b/tsl/test/expected/dist_move_chunk.out index 892561e20eb..f3c5ccea855 100644 --- a/tsl/test/expected/dist_move_chunk.out +++ b/tsl/test/expected/dist_move_chunk.out @@ -12,6 +12,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ----------------------+----------------------+--------------+------------------+------------------- db_dist_move_chunk_1 | db_dist_move_chunk_1 | t | t | t @@ -25,6 +28,7 @@ GRANT CREATE ON SCHEMA public TO :ROLE_1; SET ROLE :ROLE_1; CREATE TABLE dist_test(time timestamp NOT NULL, device int, temp float); SELECT create_distributed_hypertable('dist_test', 'time', 'device', 3); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices create_distributed_hypertable ------------------------------- @@ -42,6 +46,7 @@ SELECT * from show_chunks('dist_test'); (4 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_move_chunk_1]: SELECT * from show_chunks('dist_test') NOTICE: [db_dist_move_chunk_1]: show_chunks @@ -79,6 +84,7 @@ SELECT sum(device) FROM dist_test; (1 row) SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_move_chunk_1]: SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk NOTICE: [db_dist_move_chunk_1]: sum @@ -172,6 +178,7 @@ ERROR: chunk "_dist_hyper_1_2_chunk" does not exist on source data node "db_dis -- do actualy copy CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> :'DATA_NODE_1', destination_node => :'DATA_NODE_2'); SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_move_chunk_1]: SELECT * from show_chunks('dist_test') NOTICE: [db_dist_move_chunk_1]: show_chunks @@ -204,6 +211,7 @@ _timescaledb_internal._dist_hyper_1_3_chunk (1 row) SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_2'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_move_chunk_2]: SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk NOTICE: [db_dist_move_chunk_2]: sum @@ -225,6 +233,7 @@ ERROR: chunk "_dist_hyper_1_1_chunk" already exists on destination data node "d -- now try to move the same chunk from data node 2 to 3 CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> :'DATA_NODE_2', destination_node => :'DATA_NODE_3'); SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_move_chunk_1]: SELECT * from show_chunks('dist_test') NOTICE: [db_dist_move_chunk_1]: show_chunks @@ -257,6 +266,7 @@ _timescaledb_internal._dist_hyper_1_1_chunk (1 row) SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_3'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_move_chunk_3]: SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk NOTICE: [db_dist_move_chunk_3]: sum @@ -322,6 +332,7 @@ DROP TABLE dist_test; -- Create a compressed hypertable CREATE TABLE dist_test(time timestamp NOT NULL, device int, temp float); SELECT create_distributed_hypertable('dist_test', 'time', 'device', 3); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "time" does not follow best practices create_distributed_hypertable ------------------------------- @@ -348,6 +359,7 @@ SELECT * from show_chunks('dist_test'); (4 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_move_chunk_1]: SELECT * from show_chunks('dist_test') NOTICE: [db_dist_move_chunk_1]: show_chunks @@ -507,6 +519,7 @@ SELECT * FROM show_chunks('dist_test'); (4 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_move_chunk_1]: SELECT * from show_chunks('dist_test') NOTICE: [db_dist_move_chunk_1]: show_chunks diff --git a/tsl/test/expected/dist_param.out b/tsl/test/expected/dist_param.out index 564a15705a4..e3ddaec1875 100644 --- a/tsl/test/expected/dist_param.out +++ b/tsl/test/expected/dist_param.out @@ -25,6 +25,7 @@ create or replace function mix(x float4) returns float4 as $$ select ((hashfloat -- distributed hypertable create table metric_dist(ts timestamptz, id int, value float); select create_distributed_hypertable('metric_dist', 'ts', 'id'); +WARNING: distributed hypertable is deprecated WARNING: only one data node was assigned to the hypertable NOTICE: adding not-null constraint to column "ts" create_distributed_hypertable @@ -511,14 +512,14 @@ limit 1 Output: metric_dist.id, metric_dist.ts, metric_dist.value -> Unique Output: metric_dist.id, metric_dist.ts, metric_dist.value - -> Incremental Sort + -> Sort Output: metric_dist.id, metric_dist.ts, metric_dist.value Sort Key: metric_dist.id, metric_dist.ts, metric_dist.value - Presorted Key: metric_dist.id -> Nested Loop Output: metric_dist.id, metric_dist.ts, metric_dist.value - -> Index Scan using metric_name_id on public.metric_name + -> Index Scan using metric_name_name on public.metric_name Output: metric_name.id, metric_name.name + Index Cond: ((metric_name.name >= 'cpu'::text) AND (metric_name.name < 'cpv'::text)) Filter: (metric_name.name ~~ 'cpu%'::text) -> Custom Scan (DataNodeScan) on public.metric_dist Output: metric_dist.id, metric_dist.ts, metric_dist.value @@ -556,10 +557,9 @@ limit 1 Output: metric_name.name, metric_dist.ts, metric_dist.value -> Unique Output: metric_name.name, metric_dist.ts, metric_dist.value - -> Incremental Sort + -> Sort Output: metric_name.name, metric_dist.ts, metric_dist.value Sort Key: metric_name.name COLLATE "C", metric_dist.ts, metric_dist.value - Presorted Key: metric_name.name -> Nested Loop Output: metric_name.name, metric_dist.ts, metric_dist.value -> Index Scan using metric_name_name on public.metric_name @@ -571,7 +571,7 @@ limit 1 Data node: data_node_1 Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_16_chunk, _dist_hyper_1_20_chunk, _dist_hyper_1_37_chunk, _dist_hyper_1_52_chunk Remote SQL: SELECT ts, id, value FROM public.metric_dist WHERE _timescaledb_functions.chunks_in(public.metric_dist.*, ARRAY[3, 16, 20, 37, 52]) AND ((ts >= '2022-02-01 15:02:02-08'::timestamp with time zone)) AND ((ts <= '2022-03-02 15:02:02-08'::timestamp with time zone)) AND (($1::integer = id)) -(19 rows) +(18 rows) -- If there are a lot of rows chosen from the local table, the parameterized -- nested loop might download the entire dist table or even more than that (in @@ -674,7 +674,7 @@ group by id -> Nested Loop Output: metric_location.id, metric_name.id Inner Unique: true - Join Filter: (metric_name.id = metric_location.id) + Join Filter: (metric_location.id = metric_name.id) -> Seq Scan on public.metric_location Output: metric_location.id, metric_location.location Filter: texteq(metric_location.location, 'Yerevan'::text) @@ -709,7 +709,7 @@ order by 1 Output: metric_dist.id, metric_dist.value -> Nested Loop Output: max_value_times.ts, max_value_times.id, metric_name.id - Join Filter: (metric_name.id = max_value_times.id) + Join Filter: (max_value_times.id = metric_name.id) -> Index Scan using metric_name_id on public.metric_name Output: metric_name.id, metric_name.name Filter: (metric_name.name ~~ 'cpu%'::text) diff --git a/tsl/test/expected/dist_partial_agg-13.out b/tsl/test/expected/dist_partial_agg-13.out index 01c9901480d..53e6b319e85 100644 --- a/tsl/test/expected/dist_partial_agg-13.out +++ b/tsl/test/expected/dist_partial_agg-13.out @@ -50,6 +50,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------------+-----------------------+--------------+------------------+------------------- db_dist_partial_agg_1 | db_dist_partial_agg_1 | t | t | t @@ -62,6 +65,7 @@ SELECT * FROM test.remote_exec('{ db_dist_partial_agg_1, db_dist_partial_agg_2, $$ CREATE TYPE custom_type AS (high int, low int); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_partial_agg_1]: CREATE TYPE custom_type AS (high int, low int) NOTICE: [db_dist_partial_agg_2]: @@ -84,6 +88,7 @@ ALTER DATABASE :DATA_NODE_2 SET enable_partitionwise_aggregate TO true; ALTER DATABASE :DATA_NODE_3 SET enable_partitionwise_aggregate TO true; SET ROLE :ROLE_1; SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day'); +WARNING: distributed hypertable is deprecated table_name ------------ conditions diff --git a/tsl/test/expected/dist_partial_agg-14.out b/tsl/test/expected/dist_partial_agg-14.out index 01c9901480d..53e6b319e85 100644 --- a/tsl/test/expected/dist_partial_agg-14.out +++ b/tsl/test/expected/dist_partial_agg-14.out @@ -50,6 +50,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------------+-----------------------+--------------+------------------+------------------- db_dist_partial_agg_1 | db_dist_partial_agg_1 | t | t | t @@ -62,6 +65,7 @@ SELECT * FROM test.remote_exec('{ db_dist_partial_agg_1, db_dist_partial_agg_2, $$ CREATE TYPE custom_type AS (high int, low int); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_partial_agg_1]: CREATE TYPE custom_type AS (high int, low int) NOTICE: [db_dist_partial_agg_2]: @@ -84,6 +88,7 @@ ALTER DATABASE :DATA_NODE_2 SET enable_partitionwise_aggregate TO true; ALTER DATABASE :DATA_NODE_3 SET enable_partitionwise_aggregate TO true; SET ROLE :ROLE_1; SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day'); +WARNING: distributed hypertable is deprecated table_name ------------ conditions diff --git a/tsl/test/expected/dist_partial_agg-15.out b/tsl/test/expected/dist_partial_agg-15.out index 39449b18398..6a1d81c2e7b 100644 --- a/tsl/test/expected/dist_partial_agg-15.out +++ b/tsl/test/expected/dist_partial_agg-15.out @@ -50,6 +50,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------------+-----------------------+--------------+------------------+------------------- db_dist_partial_agg_1 | db_dist_partial_agg_1 | t | t | t @@ -62,6 +65,7 @@ SELECT * FROM test.remote_exec('{ db_dist_partial_agg_1, db_dist_partial_agg_2, $$ CREATE TYPE custom_type AS (high int, low int); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_partial_agg_1]: CREATE TYPE custom_type AS (high int, low int) NOTICE: [db_dist_partial_agg_2]: @@ -84,6 +88,7 @@ ALTER DATABASE :DATA_NODE_2 SET enable_partitionwise_aggregate TO true; ALTER DATABASE :DATA_NODE_3 SET enable_partitionwise_aggregate TO true; SET ROLE :ROLE_1; SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day'); +WARNING: distributed hypertable is deprecated table_name ------------ conditions diff --git a/tsl/test/expected/dist_policy.out b/tsl/test/expected/dist_policy.out index b08fd76868e..4bcfd8b7cf1 100644 --- a/tsl/test/expected/dist_policy.out +++ b/tsl/test/expected/dist_policy.out @@ -13,6 +13,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ------------------+------------------+--------------+------------------+------------------- db_dist_policy_1 | db_dist_policy_1 | t | t | t @@ -41,6 +44,7 @@ SELECT * FROM test.remote_exec(NULL, $$ STABLE AS 'SELECT time FROM time_table'; GRANT ALL ON TABLE time_table TO PUBLIC; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_policy_1]: CREATE TABLE time_table (time BIGINT) NOTICE: [db_dist_policy_1]: @@ -87,6 +91,7 @@ CREATE TABLE conditions( ); SELECT * FROM create_distributed_hypertable('conditions', 'time', 'device', 3, chunk_time_interval => 5); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 1 | public | conditions | t @@ -139,6 +144,7 @@ SELECT show_chunks('conditions'); (27 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT show_chunks('conditions'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_policy_1]: SELECT show_chunks('conditions') NOTICE: [db_dist_policy_1]: show_chunks @@ -194,6 +200,7 @@ _timescaledb_internal._dist_hyper_1_27_chunk UPDATE time_table SET time = 20; SELECT * FROM test.remote_exec(NULL, $$ UPDATE time_table SET time = 20; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_policy_1]: UPDATE time_table SET time = 20 NOTICE: [db_dist_policy_2]: UPDATE time_table SET time = 20 NOTICE: [db_dist_policy_3]: UPDATE time_table SET time = 20 @@ -227,6 +234,7 @@ SELECT show_chunks('conditions'); (18 rows) SELECT * FROM test.remote_exec(NULL, $$ SELECT show_chunks('conditions'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_policy_1]: SELECT show_chunks('conditions') NOTICE: [db_dist_policy_1]: show_chunks diff --git a/tsl/test/expected/dist_query-13.out b/tsl/test/expected/dist_query-13.out index 78e0493e7c2..88f82b1f5ae 100644 --- a/tsl/test/expected/dist_query-13.out +++ b/tsl/test/expected/dist_query-13.out @@ -43,6 +43,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +psql:include/dist_query_load.sql:17: WARNING: adding data node is deprecated +psql:include/dist_query_load.sql:17: WARNING: adding data node is deprecated +psql:include/dist_query_load.sql:17: WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------+-----------------+--------------+------------------+------------------- db_dist_query_1 | db_dist_query_1 | t | t | t @@ -61,12 +64,14 @@ CREATE TABLE hyper (LIKE reference); CREATE TABLE hyper1d (LIKE reference); SELECT create_distributed_hypertable('hyper', 'time', 'device', 3, chunk_time_interval => interval '18 hours'); +psql:include/dist_query_load.sql:30: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (1,public,hyper,t) (1 row) SELECT create_distributed_hypertable('hyper1d', 'time', chunk_time_interval => interval '36 hours'); +psql:include/dist_query_load.sql:32: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (2,public,hyper1d,t) diff --git a/tsl/test/expected/dist_query-14.out b/tsl/test/expected/dist_query-14.out index 78e0493e7c2..88f82b1f5ae 100644 --- a/tsl/test/expected/dist_query-14.out +++ b/tsl/test/expected/dist_query-14.out @@ -43,6 +43,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +psql:include/dist_query_load.sql:17: WARNING: adding data node is deprecated +psql:include/dist_query_load.sql:17: WARNING: adding data node is deprecated +psql:include/dist_query_load.sql:17: WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------+-----------------+--------------+------------------+------------------- db_dist_query_1 | db_dist_query_1 | t | t | t @@ -61,12 +64,14 @@ CREATE TABLE hyper (LIKE reference); CREATE TABLE hyper1d (LIKE reference); SELECT create_distributed_hypertable('hyper', 'time', 'device', 3, chunk_time_interval => interval '18 hours'); +psql:include/dist_query_load.sql:30: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (1,public,hyper,t) (1 row) SELECT create_distributed_hypertable('hyper1d', 'time', chunk_time_interval => interval '36 hours'); +psql:include/dist_query_load.sql:32: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (2,public,hyper1d,t) diff --git a/tsl/test/expected/dist_query-15.out b/tsl/test/expected/dist_query-15.out index 7a4e62a40ea..ec70d3a90fa 100644 --- a/tsl/test/expected/dist_query-15.out +++ b/tsl/test/expected/dist_query-15.out @@ -43,6 +43,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +psql:include/dist_query_load.sql:17: WARNING: adding data node is deprecated +psql:include/dist_query_load.sql:17: WARNING: adding data node is deprecated +psql:include/dist_query_load.sql:17: WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------+-----------------+--------------+------------------+------------------- db_dist_query_1 | db_dist_query_1 | t | t | t @@ -61,12 +64,14 @@ CREATE TABLE hyper (LIKE reference); CREATE TABLE hyper1d (LIKE reference); SELECT create_distributed_hypertable('hyper', 'time', 'device', 3, chunk_time_interval => interval '18 hours'); +psql:include/dist_query_load.sql:30: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (1,public,hyper,t) (1 row) SELECT create_distributed_hypertable('hyper1d', 'time', chunk_time_interval => interval '36 hours'); +psql:include/dist_query_load.sql:32: WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (2,public,hyper1d,t) diff --git a/tsl/test/expected/dist_ref_table_join-13.out b/tsl/test/expected/dist_ref_table_join-13.out index 41268acea0a..f26370a42b6 100644 --- a/tsl/test/expected/dist_ref_table_join-13.out +++ b/tsl/test/expected/dist_ref_table_join-13.out @@ -12,6 +12,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created --------------------------+--------------------------+--------------+------------------+------------------- db_dist_ref_table_join_1 | db_dist_ref_table_join_1 | t | t | t @@ -33,6 +36,7 @@ drop table if exists metric; NOTICE: table "metric" does not exist, skipping CREATE table metric(ts timestamptz, id int, value float); SELECT create_distributed_hypertable('metric', 'ts', 'id'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "ts" create_distributed_hypertable ------------------------------- @@ -53,6 +57,7 @@ CALL distributed_exec($$INSERT into metric_name values (2, 'cpu2');$$); -- The reference table as DHT CREATE TABLE metric_name_dht(id BIGSERIAL, name text); SELECT create_distributed_hypertable('metric_name_dht', 'id', chunk_time_interval => 9223372036854775807, replication_factor => 3); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (2,public,metric_name_dht,t) @@ -66,6 +71,7 @@ INSERT into metric_name_local values (1, 'cpu1'); INSERT into metric_name_local values (2, 'cpu2'); CREATE table reference_table2(id int primary key, name text); SELECT create_distributed_hypertable('reference_table2', 'id', chunk_time_interval => 2147483647, replication_factor => 3); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (3,public,reference_table2,t) diff --git a/tsl/test/expected/dist_ref_table_join-14.out b/tsl/test/expected/dist_ref_table_join-14.out index 41268acea0a..f26370a42b6 100644 --- a/tsl/test/expected/dist_ref_table_join-14.out +++ b/tsl/test/expected/dist_ref_table_join-14.out @@ -12,6 +12,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created --------------------------+--------------------------+--------------+------------------+------------------- db_dist_ref_table_join_1 | db_dist_ref_table_join_1 | t | t | t @@ -33,6 +36,7 @@ drop table if exists metric; NOTICE: table "metric" does not exist, skipping CREATE table metric(ts timestamptz, id int, value float); SELECT create_distributed_hypertable('metric', 'ts', 'id'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "ts" create_distributed_hypertable ------------------------------- @@ -53,6 +57,7 @@ CALL distributed_exec($$INSERT into metric_name values (2, 'cpu2');$$); -- The reference table as DHT CREATE TABLE metric_name_dht(id BIGSERIAL, name text); SELECT create_distributed_hypertable('metric_name_dht', 'id', chunk_time_interval => 9223372036854775807, replication_factor => 3); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (2,public,metric_name_dht,t) @@ -66,6 +71,7 @@ INSERT into metric_name_local values (1, 'cpu1'); INSERT into metric_name_local values (2, 'cpu2'); CREATE table reference_table2(id int primary key, name text); SELECT create_distributed_hypertable('reference_table2', 'id', chunk_time_interval => 2147483647, replication_factor => 3); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (3,public,reference_table2,t) diff --git a/tsl/test/expected/dist_ref_table_join-15.out b/tsl/test/expected/dist_ref_table_join-15.out index f0fc68ad5b9..0ae8d29edaa 100644 --- a/tsl/test/expected/dist_ref_table_join-15.out +++ b/tsl/test/expected/dist_ref_table_join-15.out @@ -12,6 +12,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created --------------------------+--------------------------+--------------+------------------+------------------- db_dist_ref_table_join_1 | db_dist_ref_table_join_1 | t | t | t @@ -33,6 +36,7 @@ drop table if exists metric; NOTICE: table "metric" does not exist, skipping CREATE table metric(ts timestamptz, id int, value float); SELECT create_distributed_hypertable('metric', 'ts', 'id'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "ts" create_distributed_hypertable ------------------------------- @@ -53,6 +57,7 @@ CALL distributed_exec($$INSERT into metric_name values (2, 'cpu2');$$); -- The reference table as DHT CREATE TABLE metric_name_dht(id BIGSERIAL, name text); SELECT create_distributed_hypertable('metric_name_dht', 'id', chunk_time_interval => 9223372036854775807, replication_factor => 3); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (2,public,metric_name_dht,t) @@ -66,6 +71,7 @@ INSERT into metric_name_local values (1, 'cpu1'); INSERT into metric_name_local values (2, 'cpu2'); CREATE table reference_table2(id int primary key, name text); SELECT create_distributed_hypertable('reference_table2', 'id', chunk_time_interval => 2147483647, replication_factor => 3); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (3,public,reference_table2,t) diff --git a/tsl/test/expected/dist_remote_error-14.out b/tsl/test/expected/dist_remote_error-14.out index 22fa4256367..c9feb05a642 100644 --- a/tsl/test/expected/dist_remote_error-14.out +++ b/tsl/test/expected/dist_remote_error-14.out @@ -14,6 +14,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ------------------------+------------------------+--------------+------------------+------------------- db_dist_remote_error_1 | db_dist_remote_error_1 | t | t | t diff --git a/tsl/test/expected/dist_remote_error-15.out b/tsl/test/expected/dist_remote_error-15.out index c32fbd0e2e3..e1c4239193b 100644 --- a/tsl/test/expected/dist_remote_error-15.out +++ b/tsl/test/expected/dist_remote_error-15.out @@ -14,6 +14,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ------------------------+------------------------+--------------+------------------+------------------- db_dist_remote_error_1 | db_dist_remote_error_1 | t | t | t diff --git a/tsl/test/expected/dist_triggers.out b/tsl/test/expected/dist_triggers.out index 96651c1c47a..e7497ebf889 100644 --- a/tsl/test/expected/dist_triggers.out +++ b/tsl/test/expected/dist_triggers.out @@ -19,6 +19,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created --------------------+--------------------+--------------+------------------+------------------- db_dist_triggers_1 | db_dist_triggers_1 | t | t | t @@ -93,6 +96,7 @@ CREATE TRIGGER z_test_trigger_all_after --- Create some triggers before we turn the table into a distributed --- hypertable and some triggers after so that we test both cases. SELECT * FROM create_distributed_hypertable('hyper', 'time', 'device_id', 3, chunk_time_interval => 10, data_nodes => ARRAY[:'DATA_NODE_1', :'DATA_NODE_2']); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 1 | public | hyper | t @@ -198,6 +202,7 @@ FROM trigger_events GROUP BY 1,2,3,4 ORDER BY 1,2,3,4; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_triggers_1]: SELECT tg_when, tg_level, tg_op, tg_name, count(*) FROM trigger_events @@ -260,6 +265,7 @@ FROM trigger_events GROUP BY 1,2,3,4 ORDER BY 1,2,3,4; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_triggers_1]: SELECT tg_when, tg_level, tg_op, tg_name, count(*) FROM trigger_events @@ -306,6 +312,7 @@ BEFORE |ROW |UPDATE|z_test_trigger_all | 2 -- Attach a new data node and show that the hypertable is created on -- the node, including its triggers. SELECT attach_data_node(:'DATA_NODE_3', 'hyper'); +WARNING: attaching data node is deprecated attach_data_node -------------------------- (1,1,db_dist_triggers_3) @@ -325,6 +332,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_3'], $$ SELECT test.show_triggers('hyper'); $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_triggers_3]: SELECT test.show_triggers('hyper') NOTICE: [db_dist_triggers_3]: @@ -364,6 +372,7 @@ FROM trigger_events GROUP BY 1,2,3,4 ORDER BY 1,2,3,4; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_triggers_3]: SELECT tg_when, tg_level, tg_op, tg_name, count(*) FROM trigger_events @@ -407,6 +416,7 @@ SELECT * FROM test.remote_exec(ARRAY[:'DATA_NODE_1', :'DATA_NODE_2', :'DATA_NODE SELECT st."Child" as chunk_relid, test.show_triggers((st)."Child") FROM test.show_subtables('hyper') st; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_triggers_1]: SELECT st."Child" as chunk_relid, test.show_triggers((st)."Child") FROM test.show_subtables('hyper') st @@ -450,6 +460,7 @@ CREATE TABLE disttable( temp_c float ); SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device'); +WARNING: distributed hypertable is deprecated hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- 2 | public | disttable | t @@ -487,6 +498,7 @@ CREATE TRIGGER _0_temp_increment FOR EACH ROW EXECUTE FUNCTION temp_increment_trigger(); -- Show that the trigger exists on a data node SELECT test.remote_exec(ARRAY[:'DATA_NODE_3'], $$ SELECT test.show_triggers('disttable') $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_triggers_3]: SELECT test.show_triggers('disttable') NOTICE: [db_dist_triggers_3]: show_triggers @@ -637,6 +649,7 @@ SELECT * FROM test.remote_exec(NULL, $$ SELECT st."Child" as chunk_relid, test.show_triggers((st)."Child") FROM test.show_subtables('disttable') st; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_triggers_1]: SELECT st."Child" as chunk_relid, test.show_triggers((st)."Child") FROM test.show_subtables('disttable') st @@ -681,6 +694,7 @@ SELECT * FROM test.remote_exec(NULL, $$ SELECT st."Child" as chunk_relid, test.show_triggers((st)."Child") FROM test.show_subtables('disttable') st; $$); +WARNING: executing remote command is deprecated NOTICE: [db_dist_triggers_1]: SELECT st."Child" as chunk_relid, test.show_triggers((st)."Child") FROM test.show_subtables('disttable') st diff --git a/tsl/test/expected/dist_util.out b/tsl/test/expected/dist_util.out index facec095a1d..e63f29727af 100644 --- a/tsl/test/expected/dist_util.out +++ b/tsl/test/expected/dist_util.out @@ -123,35 +123,43 @@ SET client_min_messages TO NOTICE; -- Adding frontend as backend to a different frontend should fail \c frontend_1 :ROLE_CLUSTER_SUPERUSER SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'frontend_2', bootstrap => true); +WARNING: adding data node is deprecated NOTICE: database "frontend_2" already exists on data node, skipping NOTICE: extension "timescaledb" already exists on data node, skipping ERROR: cannot add "invalid_data_node" as a data node SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'frontend_2', bootstrap => false); +WARNING: adding data node is deprecated ERROR: cannot add "invalid_data_node" as a data node ---------------------------------------------------------------- -- Adding backend from a different group as a backend should fail \c frontend_1 :ROLE_CLUSTER_SUPERUSER SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'backend_2_1', bootstrap => true); +WARNING: adding data node is deprecated NOTICE: database "backend_2_1" already exists on data node, skipping NOTICE: extension "timescaledb" already exists on data node, skipping ERROR: cannot add "invalid_data_node" as a data node SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'backend_2_1', bootstrap => false); +WARNING: adding data node is deprecated ERROR: cannot add "invalid_data_node" as a data node ---------------------------------------------------------------- -- Adding a valid backend target but to an existing backend should fail \c backend_1_1 :ROLE_CLUSTER_SUPERUSER SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'backend_2_1', bootstrap => true); +WARNING: adding data node is deprecated ERROR: unable to assign data nodes from an existing distributed database SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'backend_2_1', bootstrap => false); +WARNING: adding data node is deprecated ERROR: unable to assign data nodes from an existing distributed database ---------------------------------------------------------------- -- Adding a frontend (frontend 1) as a backend to a nondistributed node (TEST_DBNAME) should fail \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'frontend_1', bootstrap => true); +WARNING: adding data node is deprecated NOTICE: database "frontend_1" already exists on data node, skipping NOTICE: extension "timescaledb" already exists on data node, skipping ERROR: cannot add "invalid_data_node" as a data node SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'frontend_1', bootstrap => false); +WARNING: adding data node is deprecated ERROR: cannot add "invalid_data_node" as a data node \set ON_ERROR_STOP 1 ---------------------------------------------------------------- @@ -160,6 +168,7 @@ ERROR: cannot add "invalid_data_node" as a data node \c frontend_1 :ROLE_CLUSTER_SUPERUSER SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_2', host => 'localhost', database => 'backend_x_2', bootstrap => true); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+-------------+--------------+------------------+------------------- data_node_2 | backend_x_2 | t | t | t @@ -176,6 +185,7 @@ SELECT key, value FROM _timescaledb_catalog.metadata WHERE key LIKE 'dist_uuid'; -- Now remove a backend from this distributed database to add it to the other cluster \c frontend_1 :ROLE_CLUSTER_SUPERUSER SELECT * FROM delete_data_node('data_node_2'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -196,6 +206,7 @@ DELETE FROM _timescaledb_catalog.metadata WHERE key = 'dist_uuid'; \c frontend_2 :ROLE_CLUSTER_SUPERUSER SELECT node_name, database, node_created, database_created, extension_created FROM add_data_node('data_node_2', host => 'localhost', database => 'backend_x_2', bootstrap => false); +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -------------+-------------+--------------+------------------+------------------- data_node_2 | backend_x_2 | t | f | f @@ -221,6 +232,7 @@ NOTICE: adding not-null constraint to column "time" (1 row) SELECT * FROM create_distributed_hypertable('disttable', 'time', create_default_indexes => false); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- diff --git a/tsl/test/expected/dist_views.out b/tsl/test/expected/dist_views.out index 3a7a5be8117..469b383c4f2 100644 --- a/tsl/test/expected/dist_views.out +++ b/tsl/test/expected/dist_views.out @@ -13,6 +13,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created -----------------+-----------------+--------------+------------------+------------------- db_dist_views_1 | db_dist_views_1 | t | t | t @@ -32,6 +35,7 @@ SELECT setseed(1); CREATE TABLE dist_table(time timestamptz NOT NULL, device int, temp float, timedim date NOT NULL); SELECT create_distributed_hypertable('dist_table', 'time', 'device', replication_factor => 2); +WARNING: distributed hypertable is deprecated create_distributed_hypertable ------------------------------- (1,public,dist_table,t) @@ -120,6 +124,7 @@ ORDER BY node_name;; ---tables with special characters in the name ---- CREATE TABLE "quote'tab" ( a timestamp, b integer); SELECT create_distributed_hypertable( '"quote''tab"', 'a', 'b', replication_factor=>2, chunk_time_interval=>INTERVAL '1 day'); +WARNING: distributed hypertable is deprecated WARNING: column type "timestamp without time zone" used for "a" does not follow best practices NOTICE: adding not-null constraint to column "a" create_distributed_hypertable diff --git a/tsl/test/expected/exp_cagg_monthly.out b/tsl/test/expected/exp_cagg_monthly.out index 9ac5062e4d3..2ca5eaa9e9d 100644 --- a/tsl/test/expected/exp_cagg_monthly.out +++ b/tsl/test/expected/exp_cagg_monthly.out @@ -1027,179 +1027,6 @@ SELECT * FROM conditions_large_1y ORDER BY bucket; (11 rows) RESET timescaledb.materializations_per_refresh_window; --- Test caggs with monthly buckets on top of distributed hypertable -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 -\set DATA_NODE_3 :TEST_DBNAME _3 -SELECT node_name, database, node_created, database_created, extension_created -FROM ( - SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* - FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) -) a; - node_name | database | node_created | database_created | extension_created ------------------------+-----------------------+--------------+------------------+------------------- - db_exp_cagg_monthly_1 | db_exp_cagg_monthly_1 | t | t | t - db_exp_cagg_monthly_2 | db_exp_cagg_monthly_2 | t | t | t - db_exp_cagg_monthly_3 | db_exp_cagg_monthly_3 | t | t | t -(3 rows) - -GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; --- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes -GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; -SET ROLE :ROLE_DEFAULT_PERM_USER; -CREATE TABLE conditions_dist( - day DATE NOT NULL, - temperature INT NOT NULL); -SELECT table_name FROM create_distributed_hypertable('conditions_dist', 'day', chunk_time_interval => INTERVAL '1 day'); - table_name ------------------ - conditions_dist -(1 row) - -INSERT INTO conditions_dist(day, temperature) -SELECT ts, date_part('month', ts)*100 + date_part('day', ts) -FROM generate_series('2010-01-01' :: date, '2010-03-01' :: date - interval '1 day', '1 day') as ts; -CREATE MATERIALIZED VIEW conditions_dist_1m -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - timescaledb_experimental.time_bucket_ng('1 month', day) AS bucket, - MIN(temperature), - MAX(temperature) -FROM conditions_dist -GROUP BY bucket; -NOTICE: refreshing continuous aggregate "conditions_dist_1m" -SELECT mat_hypertable_id AS cagg_id -FROM _timescaledb_catalog.continuous_agg -WHERE user_view_name = 'conditions_dist_1m' -\gset -SELECT raw_hypertable_id AS ht_id -FROM _timescaledb_catalog.continuous_agg -WHERE user_view_name = 'conditions_dist_1m' -\gset -SELECT bucket_width -FROM _timescaledb_catalog.continuous_agg -WHERE mat_hypertable_id = :cagg_id; - bucket_width --------------- - -1 -(1 row) - -SELECT experimental, name, bucket_width, origin, timezone -FROM _timescaledb_catalog.continuous_aggs_bucket_function -WHERE mat_hypertable_id = :cagg_id; - experimental | name | bucket_width | origin | timezone ---------------+----------------+--------------+--------+---------- - t | time_bucket_ng | @ 1 mon | | -(1 row) - -SELECT * FROM conditions_dist_1m ORDER BY bucket; - bucket | min | max -------------+-----+----- - 01-01-2010 | 101 | 131 - 02-01-2010 | 201 | 228 -(2 rows) - --- Same test but with non-realtime, NO DATA aggregate and manual refresh -CREATE MATERIALIZED VIEW conditions_dist_1m_manual -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - timescaledb_experimental.time_bucket_ng('1 month', day) AS bucket, - MIN(temperature), - MAX(temperature) -FROM conditions_dist -GROUP BY bucket -WITH NO DATA; -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - bucket | min | max ---------+-----+----- -(0 rows) - -CALL refresh_continuous_aggregate('conditions_dist_1m_manual', '2010-01-01', '2010-03-01'); -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - bucket | min | max -------------+-----+----- - 01-01-2010 | 101 | 131 - 02-01-2010 | 201 | 228 -(2 rows) - --- Check invalidation for caggs on top of distributed hypertable -INSERT INTO conditions_dist(day, temperature) -VALUES ('2010-01-15', 999), ('2010-02-15', -999), ('2010-03-01', 15); -SELECT * FROM conditions_dist_1m ORDER BY bucket; - bucket | min | max -------------+-----+----- - 01-01-2010 | 101 | 131 - 02-01-2010 | 201 | 228 - 03-01-2010 | 15 | 15 -(3 rows) - -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - bucket | min | max -------------+-----+----- - 01-01-2010 | 101 | 131 - 02-01-2010 | 201 | 228 -(2 rows) - -CALL refresh_continuous_aggregate('conditions_dist_1m', '2010-01-01', '2010-04-01'); -SELECT * FROM conditions_dist_1m ORDER BY bucket; - bucket | min | max -------------+------+----- - 01-01-2010 | 101 | 999 - 02-01-2010 | -999 | 228 - 03-01-2010 | 15 | 15 -(3 rows) - -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - bucket | min | max -------------+-----+----- - 01-01-2010 | 101 | 131 - 02-01-2010 | 201 | 228 -(2 rows) - -CALL refresh_continuous_aggregate('conditions_dist_1m_manual', '2010-01-01', '2010-04-01'); -SELECT * FROM conditions_dist_1m ORDER BY bucket; - bucket | min | max -------------+------+----- - 01-01-2010 | 101 | 999 - 02-01-2010 | -999 | 228 - 03-01-2010 | 15 | 15 -(3 rows) - -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - bucket | min | max -------------+------+----- - 01-01-2010 | 101 | 999 - 02-01-2010 | -999 | 228 - 03-01-2010 | 15 | 15 -(3 rows) - -ALTER MATERIALIZED VIEW conditions_dist_1m_manual SET ( timescaledb.compress ); -NOTICE: defaulting compress_orderby to bucket -SELECT compress_chunk(ch) -FROM show_chunks('conditions_dist_1m_manual') ch limit 1; - compress_chunk -------------------------------------------- - _timescaledb_internal._hyper_17_533_chunk -(1 row) - -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - bucket | min | max -------------+------+----- - 01-01-2010 | 101 | 999 - 02-01-2010 | -999 | 228 - 03-01-2010 | 15 | 15 -(3 rows) - --- Clean up -DROP TABLE conditions_dist CASCADE; -NOTICE: drop cascades to 5 other objects -NOTICE: drop cascades to 3 other objects -NOTICE: drop cascades to 3 other objects -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); -DROP DATABASE :DATA_NODE_3 WITH (FORCE); -- Test the specific code path of creating a CAGG on top of empty hypertable. CREATE TABLE conditions_empty( day DATE NOT NULL, @@ -1211,7 +1038,7 @@ SELECT create_hypertable( ); create_hypertable -------------------------------- - (19,public,conditions_empty,t) + (15,public,conditions_empty,t) (1 row) CREATE MATERIALIZED VIEW conditions_summary_empty @@ -1268,7 +1095,7 @@ SELECT create_hypertable( ); create_hypertable --------------------------------- - (21,public,conditions_policy,t) + (17,public,conditions_policy,t) (1 row) INSERT INTO conditions_policy (day, city, temperature) VALUES diff --git a/tsl/test/expected/exp_cagg_origin.out b/tsl/test/expected/exp_cagg_origin.out index 6b0c70c3add..c7f2ab6b9be 100644 --- a/tsl/test/expected/exp_cagg_origin.out +++ b/tsl/test/expected/exp_cagg_origin.out @@ -466,183 +466,6 @@ NOTICE: drop cascades to 7 other objects NOTICE: drop cascades to 3 other objects NOTICE: drop cascades to 3 other objects NOTICE: drop cascades to 5 other objects --- Test caggs with monthly buckets and custom origin on top of distributed hypertable -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 -\set DATA_NODE_3 :TEST_DBNAME _3 -SELECT node_name, database, node_created, database_created, extension_created -FROM ( - SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* - FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) -) a; - node_name | database | node_created | database_created | extension_created -----------------------+----------------------+--------------+------------------+------------------- - db_exp_cagg_origin_1 | db_exp_cagg_origin_1 | t | t | t - db_exp_cagg_origin_2 | db_exp_cagg_origin_2 | t | t | t - db_exp_cagg_origin_3 | db_exp_cagg_origin_3 | t | t | t -(3 rows) - -GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; --- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes -GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; -SET ROLE :ROLE_DEFAULT_PERM_USER; -CREATE TABLE conditions_dist( - day date NOT NULL, - temperature INT NOT NULL); -SELECT table_name FROM create_distributed_hypertable('conditions_dist', 'day', chunk_time_interval => INTERVAL '1 day'); - table_name ------------------ - conditions_dist -(1 row) - -INSERT INTO conditions_dist(day, temperature) -SELECT ts, date_part('month', ts)*100 + date_part('day', ts) -FROM generate_series('2010-01-01' :: date, '2010-03-01' :: date - interval '1 day', '1 day') as ts; -CREATE MATERIALIZED VIEW conditions_dist_1m -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - timescaledb_experimental.time_bucket_ng('1 month', day, '2010-01-01') AS bucket, - MIN(temperature), - MAX(temperature) -FROM conditions_dist -GROUP BY bucket; -NOTICE: refreshing continuous aggregate "conditions_dist_1m" -SELECT mat_hypertable_id AS cagg_id, raw_hypertable_id AS ht_id -FROM _timescaledb_catalog.continuous_agg -WHERE user_view_name = 'conditions_dist_1m' -\gset -SELECT bucket_width -FROM _timescaledb_catalog.continuous_agg -WHERE mat_hypertable_id = :cagg_id; - bucket_width --------------- - -1 -(1 row) - -SELECT experimental, name, bucket_width, origin, timezone -FROM _timescaledb_catalog.continuous_aggs_bucket_function -WHERE mat_hypertable_id = :cagg_id; - experimental | name | bucket_width | origin | timezone ---------------+----------------+--------------+--------------------------+---------- - t | time_bucket_ng | @ 1 mon | Fri Jan 01 00:00:00 2010 | -(1 row) - -SELECT * FROM conditions_dist_1m ORDER BY bucket; - bucket | min | max -------------+-----+----- - 01-01-2010 | 101 | 131 - 02-01-2010 | 201 | 228 -(2 rows) - --- Same test but with non-realtime, NO DATA aggregate and manual refresh -CREATE MATERIALIZED VIEW conditions_dist_1m_manual -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - timescaledb_experimental.time_bucket_ng('1 month', day, '2005-01-01') AS bucket, - MIN(temperature), - MAX(temperature) -FROM conditions_dist -GROUP BY bucket -WITH NO DATA; -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - bucket | min | max ---------+-----+----- -(0 rows) - -CALL refresh_continuous_aggregate('conditions_dist_1m_manual', '2010-01-01', '2010-03-01'); -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - bucket | min | max -------------+-----+----- - 01-01-2010 | 101 | 131 - 02-01-2010 | 201 | 228 -(2 rows) - --- Check invalidation for caggs on top of distributed hypertable -INSERT INTO conditions_dist(day, temperature) -VALUES ('2010-01-15', 999), ('2010-02-15', -999), ('2010-03-01', 15); -SELECT * FROM conditions_dist_1m ORDER BY bucket; - bucket | min | max -------------+-----+----- - 01-01-2010 | 101 | 131 - 02-01-2010 | 201 | 228 - 03-01-2010 | 15 | 15 -(3 rows) - -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - bucket | min | max -------------+-----+----- - 01-01-2010 | 101 | 131 - 02-01-2010 | 201 | 228 -(2 rows) - -CALL refresh_continuous_aggregate('conditions_dist_1m', '2010-01-01', '2010-04-01'); -SELECT * FROM conditions_dist_1m ORDER BY bucket; - bucket | min | max -------------+------+----- - 01-01-2010 | 101 | 999 - 02-01-2010 | -999 | 228 - 03-01-2010 | 15 | 15 -(3 rows) - -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - bucket | min | max -------------+-----+----- - 01-01-2010 | 101 | 131 - 02-01-2010 | 201 | 228 -(2 rows) - -CALL refresh_continuous_aggregate('conditions_dist_1m_manual', '2010-01-01', '2010-04-01'); -SELECT * FROM conditions_dist_1m ORDER BY bucket; - bucket | min | max -------------+------+----- - 01-01-2010 | 101 | 999 - 02-01-2010 | -999 | 228 - 03-01-2010 | 15 | 15 -(3 rows) - -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - bucket | min | max -------------+------+----- - 01-01-2010 | 101 | 999 - 02-01-2010 | -999 | 228 - 03-01-2010 | 15 | 15 -(3 rows) - --- Compression on top of distributed hypertables -ALTER MATERIALIZED VIEW conditions_dist_1m_manual SET ( timescaledb.compress ); -NOTICE: defaulting compress_orderby to bucket -SELECT compress_chunk(ch) -FROM show_chunks('conditions_dist_1m_manual') ch limit 1; - compress_chunk ------------------------------------------- - _timescaledb_internal._hyper_8_205_chunk -(1 row) - -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - bucket | min | max -------------+------+----- - 01-01-2010 | 101 | 999 - 02-01-2010 | -999 | 228 - 03-01-2010 | 15 | 15 -(3 rows) - --- Clean up -DROP TABLE conditions_dist CASCADE; -NOTICE: drop cascades to 5 other objects -NOTICE: drop cascades to 3 other objects -NOTICE: drop cascades to 3 other objects -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -SELECT delete_data_node(name) -FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v (name); - delete_data_node ------------------- - t - t - t -(3 rows) - -SET ROLE :ROLE_DEFAULT_PERM_USER; -- Test the specific code path of creating a CAGG on top of empty hypertable. CREATE TABLE conditions_empty( day DATE NOT NULL, @@ -652,9 +475,9 @@ SELECT create_hypertable( 'conditions_empty', 'day', chunk_time_interval => INTERVAL '1 day' ); - create_hypertable --------------------------------- - (10,public,conditions_empty,t) + create_hypertable +------------------------------- + (6,public,conditions_empty,t) (1 row) CREATE MATERIALIZED VIEW conditions_summary_empty @@ -703,7 +526,7 @@ ORDER BY month, city; -- Clean up DROP TABLE conditions_empty CASCADE; NOTICE: drop cascades to 2 other objects -NOTICE: drop cascades to table _timescaledb_internal._hyper_11_225_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_7_158_chunk -- Make sure add_continuous_aggregate_policy() works CREATE TABLE conditions_policy( day DATE NOT NULL, @@ -713,9 +536,9 @@ SELECT create_hypertable( 'conditions_policy', 'day', chunk_time_interval => INTERVAL '1 day' ); - create_hypertable ---------------------------------- - (12,public,conditions_policy,t) + create_hypertable +-------------------------------- + (8,public,conditions_policy,t) (1 row) INSERT INTO conditions_policy (day, city, temperature) VALUES @@ -770,7 +593,7 @@ SELECT add_continuous_aggregate_policy('conditions_summary_policy', -- Clean up DROP TABLE conditions_policy CASCADE; NOTICE: drop cascades to 2 other objects -NOTICE: drop cascades to table _timescaledb_internal._hyper_13_240_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_9_173_chunk -- Make sure CAGGs with custom origin work for timestamp type CREATE TABLE conditions_timestamp( tstamp TIMESTAMP NOT NULL, @@ -783,7 +606,7 @@ SELECT create_hypertable( WARNING: column type "timestamp without time zone" used for "tstamp" does not follow best practices create_hypertable ------------------------------------ - (14,public,conditions_timestamp,t) + (10,public,conditions_timestamp,t) (1 row) CREATE MATERIALIZED VIEW conditions_summary_timestamp @@ -845,8 +668,8 @@ ALTER TABLE conditions_timestamp SET ( SELECT compress_chunk(ch) FROM show_chunks('conditions_timestamp') AS ch; compress_chunk ------------------------------------------- - _timescaledb_internal._hyper_14_241_chunk - _timescaledb_internal._hyper_14_243_chunk + _timescaledb_internal._hyper_10_174_chunk + _timescaledb_internal._hyper_10_176_chunk (2 rows) -- New data is seen because the cagg is real-time @@ -896,7 +719,7 @@ SELECT add_continuous_aggregate_policy('conditions_summary_timestamp', DROP TABLE conditions_timestamp CASCADE; NOTICE: drop cascades to 2 other objects NOTICE: drop cascades to 3 other objects -NOTICE: drop cascades to table _timescaledb_internal._hyper_15_242_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_11_175_chunk -- Make sure CAGGs with custom origin work for timestamptz type CREATE TABLE conditions_timestamptz( tstamp TIMESTAMPTZ NOT NULL, @@ -908,7 +731,7 @@ SELECT create_hypertable( ); create_hypertable -------------------------------------- - (17,public,conditions_timestamptz,t) + (13,public,conditions_timestamptz,t) (1 row) -- Add some data to the hypertable and make sure it is visible in the cagg @@ -1059,9 +882,9 @@ ALTER TABLE conditions_timestamptz SET ( SELECT compress_chunk(ch) FROM show_chunks('conditions_timestamptz') AS ch; compress_chunk ------------------------------------------- - _timescaledb_internal._hyper_17_246_chunk - _timescaledb_internal._hyper_17_247_chunk - _timescaledb_internal._hyper_17_249_chunk + _timescaledb_internal._hyper_13_179_chunk + _timescaledb_internal._hyper_13_180_chunk + _timescaledb_internal._hyper_13_182_chunk (3 rows) -- New data is seen because the cagg is real-time @@ -1111,8 +934,4 @@ SELECT add_continuous_aggregate_policy('conditions_summary_timestamptz', DROP TABLE conditions_timestamptz CASCADE; NOTICE: drop cascades to 3 other objects NOTICE: drop cascades to 3 other objects -NOTICE: drop cascades to table _timescaledb_internal._hyper_19_248_chunk -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); -DROP DATABASE :DATA_NODE_3 WITH (FORCE); +NOTICE: drop cascades to table _timescaledb_internal._hyper_15_181_chunk diff --git a/tsl/test/expected/exp_cagg_timezone.out b/tsl/test/expected/exp_cagg_timezone.out index a5c207130a8..b139f89d255 100644 --- a/tsl/test/expected/exp_cagg_timezone.out +++ b/tsl/test/expected/exp_cagg_timezone.out @@ -650,202 +650,6 @@ ORDER by month, city; Moscow | 2021-10-02 00:00:00 | 2 | 4 (7 rows) --- Test caggs with monthly buckets on top of distributed hypertable -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 -\set DATA_NODE_3 :TEST_DBNAME _3 -SELECT node_name, database, node_created, database_created, extension_created -FROM ( - SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* - FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) -) a; - node_name | database | node_created | database_created | extension_created -------------------------+------------------------+--------------+------------------+------------------- - db_exp_cagg_timezone_1 | db_exp_cagg_timezone_1 | t | t | t - db_exp_cagg_timezone_2 | db_exp_cagg_timezone_2 | t | t | t - db_exp_cagg_timezone_3 | db_exp_cagg_timezone_3 | t | t | t -(3 rows) - -GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; --- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes -GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; -SET ROLE :ROLE_DEFAULT_PERM_USER; -CREATE TABLE conditions_dist( - day timestamptz NOT NULL, - temperature INT NOT NULL); -SELECT table_name FROM create_distributed_hypertable('conditions_dist', 'day', chunk_time_interval => INTERVAL '1 day'); - table_name ------------------ - conditions_dist -(1 row) - -INSERT INTO conditions_dist(day, temperature) -SELECT ts, date_part('month', ts)*100 + date_part('day', ts) -FROM generate_series('2010-01-01 00:00:00 MSK' :: timestamptz, '2010-03-01 00:00:00 MSK' :: timestamptz - interval '1 day', '1 day') as ts; -CREATE MATERIALIZED VIEW conditions_dist_1m -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - timescaledb_experimental.time_bucket_ng('1 month', day, 'MSK') AS bucket, - MIN(temperature), - MAX(temperature) -FROM conditions_dist -GROUP BY bucket; -NOTICE: refreshing continuous aggregate "conditions_dist_1m" -SELECT mat_hypertable_id AS cagg_id -FROM _timescaledb_catalog.continuous_agg -WHERE user_view_name = 'conditions_dist_1m' -\gset -SELECT raw_hypertable_id AS ht_id -FROM _timescaledb_catalog.continuous_agg -WHERE user_view_name = 'conditions_dist_1m' -\gset -SELECT bucket_width -FROM _timescaledb_catalog.continuous_agg -WHERE mat_hypertable_id = :cagg_id; - bucket_width --------------- - -1 -(1 row) - -SELECT experimental, name, bucket_width, origin, timezone -FROM _timescaledb_catalog.continuous_aggs_bucket_function -WHERE mat_hypertable_id = :cagg_id; - experimental | name | bucket_width | origin | timezone ---------------+----------------+--------------+--------+---------- - t | time_bucket_ng | @ 1 mon | | MSK -(1 row) - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m -ORDER BY month; - month | min | max ----------------------+-----+------ - 2010-01-01 00:00:00 | 101 | 1231 - 2010-02-01 00:00:00 | 131 | 227 -(2 rows) - --- Same test but with non-realtime, NO DATA aggregate and manual refresh -CREATE MATERIALIZED VIEW conditions_dist_1m_manual -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - timescaledb_experimental.time_bucket_ng('1 month', day, 'MSK') AS bucket, - MIN(temperature), - MAX(temperature) -FROM conditions_dist -GROUP BY bucket -WITH NO DATA; -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m_manual -ORDER BY month; - month | min | max --------+-----+----- -(0 rows) - -CALL refresh_continuous_aggregate('conditions_dist_1m_manual', '2010-01-01 00:00:00 MSK', '2010-03-01 00:00:00 MSK'); -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m_manual -ORDER BY month; - month | min | max ----------------------+-----+------ - 2010-01-01 00:00:00 | 101 | 1231 - 2010-02-01 00:00:00 | 131 | 227 -(2 rows) - --- Check invalidation for caggs on top of distributed hypertable -INSERT INTO conditions_dist(day, temperature) VALUES -('2010-01-15 00:00:00 MSK', 999), -('2010-02-15 00:00:00 MSK', -999), -('2010-03-01 00:00:00 MSK', 15); -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m -ORDER BY month; - month | min | max ----------------------+-----+------ - 2010-01-01 00:00:00 | 101 | 1231 - 2010-02-01 00:00:00 | 131 | 227 - 2010-03-01 00:00:00 | 15 | 15 -(3 rows) - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m_manual -ORDER BY month; - month | min | max ----------------------+-----+------ - 2010-01-01 00:00:00 | 101 | 1231 - 2010-02-01 00:00:00 | 131 | 227 -(2 rows) - -CALL refresh_continuous_aggregate('conditions_dist_1m', '2010-01-01 00:00:00 MSK', '2010-04-01 00:00:00 MSK'); -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m -ORDER BY month; - month | min | max ----------------------+------+------ - 2010-01-01 00:00:00 | 101 | 1231 - 2010-02-01 00:00:00 | -999 | 227 - 2010-03-01 00:00:00 | 15 | 15 -(3 rows) - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m_manual -ORDER BY month; - month | min | max ----------------------+-----+------ - 2010-01-01 00:00:00 | 101 | 1231 - 2010-02-01 00:00:00 | 131 | 227 -(2 rows) - -CALL refresh_continuous_aggregate('conditions_dist_1m_manual', '2010-01-01 00:00:00 MSK', '2010-04-01 00:00:00 MSK'); -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m -ORDER BY month; - month | min | max ----------------------+------+------ - 2010-01-01 00:00:00 | 101 | 1231 - 2010-02-01 00:00:00 | -999 | 227 - 2010-03-01 00:00:00 | 15 | 15 -(3 rows) - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m_manual -ORDER BY month; - month | min | max ----------------------+------+------ - 2010-01-01 00:00:00 | 101 | 1231 - 2010-02-01 00:00:00 | -999 | 227 - 2010-03-01 00:00:00 | 15 | 15 -(3 rows) - --- Check compatibility with compressed distributed hypertables -ALTER MATERIALIZED VIEW conditions_dist_1m_manual SET ( timescaledb.compress ); -NOTICE: defaulting compress_orderby to bucket -SELECT compress_chunk(ch) -FROM show_chunks('conditions_dist_1m_manual') ch limit 1; - compress_chunk -------------------------------------------- - _timescaledb_internal._hyper_12_201_chunk -(1 row) - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m_manual -ORDER BY month; - month | min | max ----------------------+------+------ - 2010-01-01 00:00:00 | 101 | 1231 - 2010-02-01 00:00:00 | -999 | 227 - 2010-03-01 00:00:00 | 15 | 15 -(3 rows) - --- Clean up -DROP TABLE conditions_dist CASCADE; -NOTICE: drop cascades to 5 other objects -NOTICE: drop cascades to 3 other objects -NOTICE: drop cascades to 3 other objects -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); -DROP DATABASE :DATA_NODE_3 WITH (FORCE); -- Make sure add_continuous_aggregate_policy() works CREATE TABLE conditions_policy( day TIMESTAMPTZ NOT NULL, @@ -857,7 +661,7 @@ SELECT create_hypertable( ); create_hypertable --------------------------------- - (14,public,conditions_policy,t) + (10,public,conditions_policy,t) (1 row) INSERT INTO conditions_policy (day, city, temperature) VALUES diff --git a/tsl/test/expected/partialize_finalize.out b/tsl/test/expected/partialize_finalize.out index 0a438d491f5..ce579d903c2 100644 --- a/tsl/test/expected/partialize_finalize.out +++ b/tsl/test/expected/partialize_finalize.out @@ -196,920 +196,4 @@ TRUNCATE TABLE t1; \COPY t1 FROM data/partialize_finalize_data.csv WITH CSV HEADER --repeat query to verify partial serialization sanitization works for versions PG >= 14 CREATE TABLE vfinal_dump_res AS SELECT * FROM vfinal; --- compare results to verify there is no difference -(SELECT * FROM vfinal_res) EXCEPT (SELECT * FROM vfinal_dump_res); - a | sumb | minc | maxd | stddevb | stddeve ----+------+------+------+---------+--------- -(0 rows) - ---with having clause -- -select a, b , _timescaledb_functions.finalize_agg( 'min(text)', 'pg_catalog', 'default', null, partialc, null::text ) minc, _timescaledb_functions.finalize_agg( 'max(timestamp with time zone)', null, null, null, partiald, null::timestamptz ) maxd from t1 where b is not null group by a, b having _timescaledb_functions.finalize_agg( 'max(timestamp with time zone)', null, null, null, partiald, null::timestamptz ) is not null order by a, b; - a | b | minc | maxd -----+----+-------+------------------------------ - 1 | 10 | hello | Fri Jan 01 09:00:00 2010 PST - 1 | 20 | abc | Sat Jan 02 09:00:00 2010 PST - 1 | 30 | abcd | Sun Jan 03 09:00:00 2010 PST - 1 | 50 | | Fri Jan 01 09:00:00 2010 PST - 2 | 10 | hello | Fri Jan 01 09:00:00 2010 PST - 2 | 20 | hello | Fri Jan 01 09:00:00 2010 PST - 2 | 30 | hello | Fri Jan 01 09:00:00 2010 PST - 12 | 10 | hello | Sat Jan 02 06:00:00 2010 PST -(8 rows) - ---TEST5 test with TOAST data -drop view vfinal; -drop table t1; -drop view v1; -drop table foo; -create table foo( a integer, b timestamptz, toastval TEXT); --- Set storage type to EXTERNAL to prevent PostgreSQL from compressing my --- easily compressable string and instead store it with TOAST -ALTER TABLE foo ALTER COLUMN toastval SET STORAGE EXTERNAL; -SELECT count(*) FROM create_hypertable('foo', 'b'); -NOTICE: adding not-null constraint to column "b" - count -------- - 1 -(1 row) - -INSERT INTO foo VALUES( 1, '2004-10-19 10:23:54', repeat('this must be over 2k. ', 1100)); -INSERT INTO foo VALUES(1, '2005-10-19 10:23:54', repeat('I am a tall big giraffe in the zoo. ', 1100)); -INSERT INTO foo values( 1, '2005-01-01 00:00:00+00', NULL); -INSERT INTO foo values( 2, '2005-01-01 00:00:00+00', NULL); -create or replace view v1(a, partialb, partialtv) as select a, _timescaledb_functions.partialize_agg( max(b) ), _timescaledb_functions.partialize_agg( min(toastval)) from foo group by a; -EXPLAIN (VERBOSE, COSTS OFF) -create table t1 as select * from v1; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Partial HashAggregate - Output: _hyper_1_1_chunk.a, _timescaledb_functions.partialize_agg(PARTIAL max(_hyper_1_1_chunk.b)), _timescaledb_functions.partialize_agg(PARTIAL min(_hyper_1_1_chunk.toastval)) - Group Key: _hyper_1_1_chunk.a - -> Append - -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.a, _hyper_1_1_chunk.b, _hyper_1_1_chunk.toastval - -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.a, _hyper_1_2_chunk.b, _hyper_1_2_chunk.toastval - -> Seq Scan on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.a, _hyper_1_3_chunk.b, _hyper_1_3_chunk.toastval -(10 rows) - -create table t1 as select * from v1; -insert into t1 select * from v1; -select a, _timescaledb_functions.finalize_agg( 'max(timestamp with time zone)', null, null, null, partialb, null::timestamptz ) maxb, -_timescaledb_functions.finalize_agg( 'min(text)', 'pg_catalog', 'default', null, partialtv, null::text ) = repeat('I am a tall big giraffe in the zoo. ', 1100) mintv_equal -from t1 group by a order by a; - a | maxb | mintv_equal ----+------------------------------+------------- - 1 | Wed Oct 19 10:23:54 2005 PDT | t - 2 | Fri Dec 31 16:00:00 2004 PST | -(2 rows) - ---non top-level partials -with cte as ( - select a, _timescaledb_functions.partialize_agg(min(toastval)) tp from foo group by a -) -select length(tp) from cte; - length --------- - 40700 - -(2 rows) - -select length(_timescaledb_functions.partialize_agg( min(toastval))) from foo group by a; - length --------- - 40700 - -(2 rows) - -select length(_timescaledb_functions.partialize_agg(min(a+1))) from foo; - length --------- - 4 -(1 row) - -\set ON_ERROR_STOP 0 -select length(_timescaledb_functions.partialize_agg(1+min(a))) from foo; -ERROR: the input to partialize must be an aggregate -select length(_timescaledb_functions.partialize_agg(min(a)+min(a))) from foo; -ERROR: the input to partialize must be an aggregate ---non-trivial HAVING clause not allowed with partialize_agg -select time_bucket('1 hour', b) as b, _timescaledb_functions.partialize_agg(avg(a)) -from foo -group by 1 -having avg(a) > 3; -ERROR: cannot partialize aggregate with HAVING clause ---mixing partialized and non-partialized aggs is not allowed -select time_bucket('1 hour', b) as b, _timescaledb_functions.partialize_agg(avg(a)), sum(a) -from foo -group by 1; -ERROR: cannot mix partialized and non-partialized aggregates in the same statement -\set ON_ERROR_STOP 1 ---partializing works with HAVING when the planner can effectively ---reduce it. In this case to a simple filter. -select time_bucket('1 hour', b) as b, toastval, _timescaledb_functions.partialize_agg(avg(a)) -from foo -group by b, toastval -having toastval LIKE 'does not exist'; - b | toastval | partialize_agg ----+----------+---------------- -(0 rows) - --- --- TEST FINALIZEFUNC_EXTRA --- --- create special aggregate to test ffunc_extra --- Raise warning with the actual type being passed in -CREATE OR REPLACE FUNCTION fake_ffunc(a int8, b int, x anyelement) -RETURNS anyelement AS $$ -BEGIN - RAISE WARNING 'type %', pg_typeof(x); - RETURN x; -END; -$$ -LANGUAGE plpgsql; -CREATE OR REPLACE FUNCTION fake_sfunc(a int8, b int, x anyelement) -RETURNS int8 AS $$ -BEGIN - RETURN b; -END; $$ -LANGUAGE plpgsql; -CREATE AGGREGATE aggregate_to_test_ffunc_extra(int, anyelement) ( - SFUNC = fake_sfunc, - STYPE = int8, - COMBINEFUNC = int8pl, - FINALFUNC = fake_ffunc, - PARALLEL = SAFE, - FINALFUNC_EXTRA -); -select aggregate_to_test_ffunc_extra(8, 'name'::text); -WARNING: type text - aggregate_to_test_ffunc_extra -------------------------------- - -(1 row) - -\set ON_ERROR_STOP 0 ---errors on wrong input type array -with cte as (SELECT _timescaledb_functions.partialize_agg(aggregate_to_test_ffunc_extra(8, 'name'::text)) as part) -select _timescaledb_functions.finalize_agg( 'aggregate_to_test_ffunc_extra(int, anyelement)', null, null, null, part, null::text) from cte; -ERROR: cannot pass null input_type with FINALFUNC_EXTRA aggregates -with cte as (SELECT _timescaledb_functions.partialize_agg(aggregate_to_test_ffunc_extra(8, 'name'::text)) as part) -select _timescaledb_functions.finalize_agg( 'aggregate_to_test_ffunc_extra(int, anyelement)', null, null, array[array['a'::name, 'b'::name, 'c'::name]], part, null::text) from cte; -ERROR: invalid input type array: expecting slices of size 2 -with cte as (SELECT _timescaledb_functions.partialize_agg(aggregate_to_test_ffunc_extra(8, 'name'::text)) as part) -select _timescaledb_functions.finalize_agg( 'aggregate_to_test_ffunc_extra(int, anyelement)', null, null, array[array[]::name[]]::name[], part, null::text) from cte; -ERROR: invalid input type array: wrong number of dimensions -with cte as (SELECT _timescaledb_functions.partialize_agg(aggregate_to_test_ffunc_extra(8, 'name'::text)) as part) -select _timescaledb_functions.finalize_agg( 'aggregate_to_test_ffunc_extra(int, anyelement)', null, null, array[]::name[], part, null::text) from cte; -ERROR: invalid input type array: wrong number of dimensions -with cte as (SELECT _timescaledb_functions.partialize_agg(aggregate_to_test_ffunc_extra(8, 'name'::text)) as part) -select _timescaledb_functions.finalize_agg( 'aggregate_to_test_ffunc_extra(int, anyelement)', null, null, array[array['public'::name, 'int'::name], array['public', 'text']], part, null::text) from cte; -ERROR: invalid input type: public.int -with cte as (SELECT _timescaledb_functions.partialize_agg(aggregate_to_test_ffunc_extra(8, 'name'::text)) as part) -select _timescaledb_functions.finalize_agg( 'aggregate_to_test_ffunc_extra(int, anyelement)', null, null, array[array['public'::name, 'int4'::name], array['public', 'text']], part, null::text) from cte; -ERROR: invalid input type: public.int4 -with cte as (SELECT _timescaledb_functions.partialize_agg(aggregate_to_test_ffunc_extra(8, 'name'::text)) as part) -select _timescaledb_functions.finalize_agg( 'aggregate_to_test_ffunc_extra(int, anyelement)', null, null, array[array['pg_catalog'::name, 'int4'::name], array['pg_catalog', 'text'], array['pg_catalog', 'text']], part, null::text) from cte; -ERROR: invalid number of input types -select _timescaledb_functions.finalize_agg(NULL::text,NULL::name,NULL::name,NULL::_name,NULL::bytea,a) over () from foo; -ERROR: finalize_agg_sfunc called in non-aggregate context -\set ON_ERROR_STOP 1 ---make sure right type in warning and is null returns true -with cte as (SELECT _timescaledb_functions.partialize_agg(aggregate_to_test_ffunc_extra(8, 'name'::text)) as part) -select _timescaledb_functions.finalize_agg( 'aggregate_to_test_ffunc_extra(int, anyelement)', null, null, array[array['pg_catalog'::name, 'int4'::name], array['pg_catalog', 'text']], part, null::text) is null from cte; -WARNING: type text - ?column? ----------- - t -(1 row) - -with cte as (SELECT _timescaledb_functions.partialize_agg(aggregate_to_test_ffunc_extra(8, 1::bigint)) as part) -select _timescaledb_functions.finalize_agg( 'aggregate_to_test_ffunc_extra(int, anyelement)', null, null, array[array['pg_catalog'::name, 'int4'::name], array['pg_catalog', 'int8']], part, null::text) is null from cte; -WARNING: type bigint - ?column? ----------- - t -(1 row) - --- Issue 4922 -CREATE TABLE issue4922 ( - time TIMESTAMPTZ NOT NULL, - value INTEGER -); -SELECT create_hypertable('issue4922', 'time'); - create_hypertable ------------------------- - (2,public,issue4922,t) -(1 row) - --- helper function: integer -> pseudorandom integer [0..100]. -CREATE OR REPLACE FUNCTION mix(x INTEGER) RETURNS INTEGER AS $$ SELECT (((hashint4(x) / (pow(2, 31) - 1) + 1) / 2) * 100)::INTEGER $$ LANGUAGE SQL; -INSERT INTO issue4922 (time, value) -SELECT '2022-01-01 00:00:00-03'::timestamptz + interval '1 year' * mix(x), mix(x) -FROM generate_series(1, 100000) x(x); -SELECT set_config(CASE WHEN current_setting('server_version_num')::int < 160000 THEN 'force_parallel_mode' ELSE 'debug_parallel_query' END,'on', false); - set_config ------------- - on -(1 row) - -SET parallel_setup_cost = 0; --- Materialize partials from execution of parallel query plan -EXPLAIN (VERBOSE, COSTS OFF) - SELECT - _timescaledb_functions.partialize_agg(sum(value)) AS partial_sum, - _timescaledb_functions.partialize_agg(avg(value)) AS partial_avg, - _timescaledb_functions.partialize_agg(min(value)) AS partial_min, - _timescaledb_functions.partialize_agg(max(value)) AS partial_max, - _timescaledb_functions.partialize_agg(count(*)) AS partial_count - FROM public.issue4922; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - Partial Aggregate - Output: _timescaledb_functions.partialize_agg(PARTIAL sum(_hyper_2_4_chunk.value)), _timescaledb_functions.partialize_agg(PARTIAL avg(_hyper_2_4_chunk.value)), _timescaledb_functions.partialize_agg(PARTIAL min(_hyper_2_4_chunk.value)), _timescaledb_functions.partialize_agg(PARTIAL max(_hyper_2_4_chunk.value)), _timescaledb_functions.partialize_agg(PARTIAL count(*)) - -> Gather - Output: (PARTIAL sum(_hyper_2_4_chunk.value)), (PARTIAL avg(_hyper_2_4_chunk.value)), (PARTIAL min(_hyper_2_4_chunk.value)), (PARTIAL max(_hyper_2_4_chunk.value)), (PARTIAL count(*)) - Workers Planned: 2 - -> Parallel Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_4_chunk.value), PARTIAL avg(_hyper_2_4_chunk.value), PARTIAL min(_hyper_2_4_chunk.value), PARTIAL max(_hyper_2_4_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_4_chunk - Output: _hyper_2_4_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_5_chunk.value), PARTIAL avg(_hyper_2_5_chunk.value), PARTIAL min(_hyper_2_5_chunk.value), PARTIAL max(_hyper_2_5_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_5_chunk - Output: _hyper_2_5_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_6_chunk.value), PARTIAL avg(_hyper_2_6_chunk.value), PARTIAL min(_hyper_2_6_chunk.value), PARTIAL max(_hyper_2_6_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_6_chunk - Output: _hyper_2_6_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_7_chunk.value), PARTIAL avg(_hyper_2_7_chunk.value), PARTIAL min(_hyper_2_7_chunk.value), PARTIAL max(_hyper_2_7_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_7_chunk - Output: _hyper_2_7_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_8_chunk.value), PARTIAL avg(_hyper_2_8_chunk.value), PARTIAL min(_hyper_2_8_chunk.value), PARTIAL max(_hyper_2_8_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_8_chunk - Output: _hyper_2_8_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_9_chunk.value), PARTIAL avg(_hyper_2_9_chunk.value), PARTIAL min(_hyper_2_9_chunk.value), PARTIAL max(_hyper_2_9_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_9_chunk - Output: _hyper_2_9_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_10_chunk.value), PARTIAL avg(_hyper_2_10_chunk.value), PARTIAL min(_hyper_2_10_chunk.value), PARTIAL max(_hyper_2_10_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_10_chunk - Output: _hyper_2_10_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_11_chunk.value), PARTIAL avg(_hyper_2_11_chunk.value), PARTIAL min(_hyper_2_11_chunk.value), PARTIAL max(_hyper_2_11_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_11_chunk - Output: _hyper_2_11_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_12_chunk.value), PARTIAL avg(_hyper_2_12_chunk.value), PARTIAL min(_hyper_2_12_chunk.value), PARTIAL max(_hyper_2_12_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_12_chunk - Output: _hyper_2_12_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_13_chunk.value), PARTIAL avg(_hyper_2_13_chunk.value), PARTIAL min(_hyper_2_13_chunk.value), PARTIAL max(_hyper_2_13_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_13_chunk - Output: _hyper_2_13_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_14_chunk.value), PARTIAL avg(_hyper_2_14_chunk.value), PARTIAL min(_hyper_2_14_chunk.value), PARTIAL max(_hyper_2_14_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_14_chunk - Output: _hyper_2_14_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_15_chunk.value), PARTIAL avg(_hyper_2_15_chunk.value), PARTIAL min(_hyper_2_15_chunk.value), PARTIAL max(_hyper_2_15_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_15_chunk - Output: _hyper_2_15_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_16_chunk.value), PARTIAL avg(_hyper_2_16_chunk.value), PARTIAL min(_hyper_2_16_chunk.value), PARTIAL max(_hyper_2_16_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_16_chunk - Output: _hyper_2_16_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_17_chunk.value), PARTIAL avg(_hyper_2_17_chunk.value), PARTIAL min(_hyper_2_17_chunk.value), PARTIAL max(_hyper_2_17_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_17_chunk - Output: _hyper_2_17_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_18_chunk.value), PARTIAL avg(_hyper_2_18_chunk.value), PARTIAL min(_hyper_2_18_chunk.value), PARTIAL max(_hyper_2_18_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_18_chunk - Output: _hyper_2_18_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_19_chunk.value), PARTIAL avg(_hyper_2_19_chunk.value), PARTIAL min(_hyper_2_19_chunk.value), PARTIAL max(_hyper_2_19_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_19_chunk - Output: _hyper_2_19_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_20_chunk.value), PARTIAL avg(_hyper_2_20_chunk.value), PARTIAL min(_hyper_2_20_chunk.value), PARTIAL max(_hyper_2_20_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_20_chunk - Output: _hyper_2_20_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_21_chunk.value), PARTIAL avg(_hyper_2_21_chunk.value), PARTIAL min(_hyper_2_21_chunk.value), PARTIAL max(_hyper_2_21_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_21_chunk - Output: _hyper_2_21_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_22_chunk.value), PARTIAL avg(_hyper_2_22_chunk.value), PARTIAL min(_hyper_2_22_chunk.value), PARTIAL max(_hyper_2_22_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_22_chunk - Output: _hyper_2_22_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_23_chunk.value), PARTIAL avg(_hyper_2_23_chunk.value), PARTIAL min(_hyper_2_23_chunk.value), PARTIAL max(_hyper_2_23_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_23_chunk - Output: _hyper_2_23_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_24_chunk.value), PARTIAL avg(_hyper_2_24_chunk.value), PARTIAL min(_hyper_2_24_chunk.value), PARTIAL max(_hyper_2_24_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_24_chunk - Output: _hyper_2_24_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_25_chunk.value), PARTIAL avg(_hyper_2_25_chunk.value), PARTIAL min(_hyper_2_25_chunk.value), PARTIAL max(_hyper_2_25_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_25_chunk - Output: _hyper_2_25_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_26_chunk.value), PARTIAL avg(_hyper_2_26_chunk.value), PARTIAL min(_hyper_2_26_chunk.value), PARTIAL max(_hyper_2_26_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_26_chunk - Output: _hyper_2_26_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_27_chunk.value), PARTIAL avg(_hyper_2_27_chunk.value), PARTIAL min(_hyper_2_27_chunk.value), PARTIAL max(_hyper_2_27_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_27_chunk - Output: _hyper_2_27_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_28_chunk.value), PARTIAL avg(_hyper_2_28_chunk.value), PARTIAL min(_hyper_2_28_chunk.value), PARTIAL max(_hyper_2_28_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_28_chunk - Output: _hyper_2_28_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_29_chunk.value), PARTIAL avg(_hyper_2_29_chunk.value), PARTIAL min(_hyper_2_29_chunk.value), PARTIAL max(_hyper_2_29_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_29_chunk - Output: _hyper_2_29_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_30_chunk.value), PARTIAL avg(_hyper_2_30_chunk.value), PARTIAL min(_hyper_2_30_chunk.value), PARTIAL max(_hyper_2_30_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_30_chunk - Output: _hyper_2_30_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_31_chunk.value), PARTIAL avg(_hyper_2_31_chunk.value), PARTIAL min(_hyper_2_31_chunk.value), PARTIAL max(_hyper_2_31_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_31_chunk - Output: _hyper_2_31_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_32_chunk.value), PARTIAL avg(_hyper_2_32_chunk.value), PARTIAL min(_hyper_2_32_chunk.value), PARTIAL max(_hyper_2_32_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_32_chunk - Output: _hyper_2_32_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_33_chunk.value), PARTIAL avg(_hyper_2_33_chunk.value), PARTIAL min(_hyper_2_33_chunk.value), PARTIAL max(_hyper_2_33_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_33_chunk - Output: _hyper_2_33_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_34_chunk.value), PARTIAL avg(_hyper_2_34_chunk.value), PARTIAL min(_hyper_2_34_chunk.value), PARTIAL max(_hyper_2_34_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_34_chunk - Output: _hyper_2_34_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_35_chunk.value), PARTIAL avg(_hyper_2_35_chunk.value), PARTIAL min(_hyper_2_35_chunk.value), PARTIAL max(_hyper_2_35_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_35_chunk - Output: _hyper_2_35_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_36_chunk.value), PARTIAL avg(_hyper_2_36_chunk.value), PARTIAL min(_hyper_2_36_chunk.value), PARTIAL max(_hyper_2_36_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_36_chunk - Output: _hyper_2_36_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_37_chunk.value), PARTIAL avg(_hyper_2_37_chunk.value), PARTIAL min(_hyper_2_37_chunk.value), PARTIAL max(_hyper_2_37_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_37_chunk - Output: _hyper_2_37_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_38_chunk.value), PARTIAL avg(_hyper_2_38_chunk.value), PARTIAL min(_hyper_2_38_chunk.value), PARTIAL max(_hyper_2_38_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_38_chunk - Output: _hyper_2_38_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_39_chunk.value), PARTIAL avg(_hyper_2_39_chunk.value), PARTIAL min(_hyper_2_39_chunk.value), PARTIAL max(_hyper_2_39_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_39_chunk - Output: _hyper_2_39_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_40_chunk.value), PARTIAL avg(_hyper_2_40_chunk.value), PARTIAL min(_hyper_2_40_chunk.value), PARTIAL max(_hyper_2_40_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_40_chunk - Output: _hyper_2_40_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_41_chunk.value), PARTIAL avg(_hyper_2_41_chunk.value), PARTIAL min(_hyper_2_41_chunk.value), PARTIAL max(_hyper_2_41_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_41_chunk - Output: _hyper_2_41_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_42_chunk.value), PARTIAL avg(_hyper_2_42_chunk.value), PARTIAL min(_hyper_2_42_chunk.value), PARTIAL max(_hyper_2_42_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_42_chunk - Output: _hyper_2_42_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_43_chunk.value), PARTIAL avg(_hyper_2_43_chunk.value), PARTIAL min(_hyper_2_43_chunk.value), PARTIAL max(_hyper_2_43_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_43_chunk - Output: _hyper_2_43_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_44_chunk.value), PARTIAL avg(_hyper_2_44_chunk.value), PARTIAL min(_hyper_2_44_chunk.value), PARTIAL max(_hyper_2_44_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_44_chunk - Output: _hyper_2_44_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_45_chunk.value), PARTIAL avg(_hyper_2_45_chunk.value), PARTIAL min(_hyper_2_45_chunk.value), PARTIAL max(_hyper_2_45_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_45_chunk - Output: _hyper_2_45_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_46_chunk.value), PARTIAL avg(_hyper_2_46_chunk.value), PARTIAL min(_hyper_2_46_chunk.value), PARTIAL max(_hyper_2_46_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_46_chunk - Output: _hyper_2_46_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_47_chunk.value), PARTIAL avg(_hyper_2_47_chunk.value), PARTIAL min(_hyper_2_47_chunk.value), PARTIAL max(_hyper_2_47_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_47_chunk - Output: _hyper_2_47_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_48_chunk.value), PARTIAL avg(_hyper_2_48_chunk.value), PARTIAL min(_hyper_2_48_chunk.value), PARTIAL max(_hyper_2_48_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_48_chunk - Output: _hyper_2_48_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_49_chunk.value), PARTIAL avg(_hyper_2_49_chunk.value), PARTIAL min(_hyper_2_49_chunk.value), PARTIAL max(_hyper_2_49_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_49_chunk - Output: _hyper_2_49_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_50_chunk.value), PARTIAL avg(_hyper_2_50_chunk.value), PARTIAL min(_hyper_2_50_chunk.value), PARTIAL max(_hyper_2_50_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_50_chunk - Output: _hyper_2_50_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_51_chunk.value), PARTIAL avg(_hyper_2_51_chunk.value), PARTIAL min(_hyper_2_51_chunk.value), PARTIAL max(_hyper_2_51_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_51_chunk - Output: _hyper_2_51_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_52_chunk.value), PARTIAL avg(_hyper_2_52_chunk.value), PARTIAL min(_hyper_2_52_chunk.value), PARTIAL max(_hyper_2_52_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_52_chunk - Output: _hyper_2_52_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_53_chunk.value), PARTIAL avg(_hyper_2_53_chunk.value), PARTIAL min(_hyper_2_53_chunk.value), PARTIAL max(_hyper_2_53_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_53_chunk - Output: _hyper_2_53_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_54_chunk.value), PARTIAL avg(_hyper_2_54_chunk.value), PARTIAL min(_hyper_2_54_chunk.value), PARTIAL max(_hyper_2_54_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_54_chunk - Output: _hyper_2_54_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_55_chunk.value), PARTIAL avg(_hyper_2_55_chunk.value), PARTIAL min(_hyper_2_55_chunk.value), PARTIAL max(_hyper_2_55_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_55_chunk - Output: _hyper_2_55_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_56_chunk.value), PARTIAL avg(_hyper_2_56_chunk.value), PARTIAL min(_hyper_2_56_chunk.value), PARTIAL max(_hyper_2_56_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_56_chunk - Output: _hyper_2_56_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_57_chunk.value), PARTIAL avg(_hyper_2_57_chunk.value), PARTIAL min(_hyper_2_57_chunk.value), PARTIAL max(_hyper_2_57_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_57_chunk - Output: _hyper_2_57_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_58_chunk.value), PARTIAL avg(_hyper_2_58_chunk.value), PARTIAL min(_hyper_2_58_chunk.value), PARTIAL max(_hyper_2_58_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_58_chunk - Output: _hyper_2_58_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_59_chunk.value), PARTIAL avg(_hyper_2_59_chunk.value), PARTIAL min(_hyper_2_59_chunk.value), PARTIAL max(_hyper_2_59_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_59_chunk - Output: _hyper_2_59_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_60_chunk.value), PARTIAL avg(_hyper_2_60_chunk.value), PARTIAL min(_hyper_2_60_chunk.value), PARTIAL max(_hyper_2_60_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_60_chunk - Output: _hyper_2_60_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_61_chunk.value), PARTIAL avg(_hyper_2_61_chunk.value), PARTIAL min(_hyper_2_61_chunk.value), PARTIAL max(_hyper_2_61_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_61_chunk - Output: _hyper_2_61_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_62_chunk.value), PARTIAL avg(_hyper_2_62_chunk.value), PARTIAL min(_hyper_2_62_chunk.value), PARTIAL max(_hyper_2_62_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_62_chunk - Output: _hyper_2_62_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_63_chunk.value), PARTIAL avg(_hyper_2_63_chunk.value), PARTIAL min(_hyper_2_63_chunk.value), PARTIAL max(_hyper_2_63_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_63_chunk - Output: _hyper_2_63_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_64_chunk.value), PARTIAL avg(_hyper_2_64_chunk.value), PARTIAL min(_hyper_2_64_chunk.value), PARTIAL max(_hyper_2_64_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_64_chunk - Output: _hyper_2_64_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_65_chunk.value), PARTIAL avg(_hyper_2_65_chunk.value), PARTIAL min(_hyper_2_65_chunk.value), PARTIAL max(_hyper_2_65_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_65_chunk - Output: _hyper_2_65_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_66_chunk.value), PARTIAL avg(_hyper_2_66_chunk.value), PARTIAL min(_hyper_2_66_chunk.value), PARTIAL max(_hyper_2_66_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_66_chunk - Output: _hyper_2_66_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_67_chunk.value), PARTIAL avg(_hyper_2_67_chunk.value), PARTIAL min(_hyper_2_67_chunk.value), PARTIAL max(_hyper_2_67_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_67_chunk - Output: _hyper_2_67_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_68_chunk.value), PARTIAL avg(_hyper_2_68_chunk.value), PARTIAL min(_hyper_2_68_chunk.value), PARTIAL max(_hyper_2_68_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_68_chunk - Output: _hyper_2_68_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_69_chunk.value), PARTIAL avg(_hyper_2_69_chunk.value), PARTIAL min(_hyper_2_69_chunk.value), PARTIAL max(_hyper_2_69_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_69_chunk - Output: _hyper_2_69_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_70_chunk.value), PARTIAL avg(_hyper_2_70_chunk.value), PARTIAL min(_hyper_2_70_chunk.value), PARTIAL max(_hyper_2_70_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_70_chunk - Output: _hyper_2_70_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_71_chunk.value), PARTIAL avg(_hyper_2_71_chunk.value), PARTIAL min(_hyper_2_71_chunk.value), PARTIAL max(_hyper_2_71_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_71_chunk - Output: _hyper_2_71_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_72_chunk.value), PARTIAL avg(_hyper_2_72_chunk.value), PARTIAL min(_hyper_2_72_chunk.value), PARTIAL max(_hyper_2_72_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_72_chunk - Output: _hyper_2_72_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_73_chunk.value), PARTIAL avg(_hyper_2_73_chunk.value), PARTIAL min(_hyper_2_73_chunk.value), PARTIAL max(_hyper_2_73_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_73_chunk - Output: _hyper_2_73_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_74_chunk.value), PARTIAL avg(_hyper_2_74_chunk.value), PARTIAL min(_hyper_2_74_chunk.value), PARTIAL max(_hyper_2_74_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_74_chunk - Output: _hyper_2_74_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_75_chunk.value), PARTIAL avg(_hyper_2_75_chunk.value), PARTIAL min(_hyper_2_75_chunk.value), PARTIAL max(_hyper_2_75_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_75_chunk - Output: _hyper_2_75_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_76_chunk.value), PARTIAL avg(_hyper_2_76_chunk.value), PARTIAL min(_hyper_2_76_chunk.value), PARTIAL max(_hyper_2_76_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_76_chunk - Output: _hyper_2_76_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_77_chunk.value), PARTIAL avg(_hyper_2_77_chunk.value), PARTIAL min(_hyper_2_77_chunk.value), PARTIAL max(_hyper_2_77_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_77_chunk - Output: _hyper_2_77_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_78_chunk.value), PARTIAL avg(_hyper_2_78_chunk.value), PARTIAL min(_hyper_2_78_chunk.value), PARTIAL max(_hyper_2_78_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_78_chunk - Output: _hyper_2_78_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_79_chunk.value), PARTIAL avg(_hyper_2_79_chunk.value), PARTIAL min(_hyper_2_79_chunk.value), PARTIAL max(_hyper_2_79_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_79_chunk - Output: _hyper_2_79_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_80_chunk.value), PARTIAL avg(_hyper_2_80_chunk.value), PARTIAL min(_hyper_2_80_chunk.value), PARTIAL max(_hyper_2_80_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_80_chunk - Output: _hyper_2_80_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_81_chunk.value), PARTIAL avg(_hyper_2_81_chunk.value), PARTIAL min(_hyper_2_81_chunk.value), PARTIAL max(_hyper_2_81_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_81_chunk - Output: _hyper_2_81_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_82_chunk.value), PARTIAL avg(_hyper_2_82_chunk.value), PARTIAL min(_hyper_2_82_chunk.value), PARTIAL max(_hyper_2_82_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_82_chunk - Output: _hyper_2_82_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_83_chunk.value), PARTIAL avg(_hyper_2_83_chunk.value), PARTIAL min(_hyper_2_83_chunk.value), PARTIAL max(_hyper_2_83_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_83_chunk - Output: _hyper_2_83_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_84_chunk.value), PARTIAL avg(_hyper_2_84_chunk.value), PARTIAL min(_hyper_2_84_chunk.value), PARTIAL max(_hyper_2_84_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_84_chunk - Output: _hyper_2_84_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_85_chunk.value), PARTIAL avg(_hyper_2_85_chunk.value), PARTIAL min(_hyper_2_85_chunk.value), PARTIAL max(_hyper_2_85_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_85_chunk - Output: _hyper_2_85_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_86_chunk.value), PARTIAL avg(_hyper_2_86_chunk.value), PARTIAL min(_hyper_2_86_chunk.value), PARTIAL max(_hyper_2_86_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_86_chunk - Output: _hyper_2_86_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_87_chunk.value), PARTIAL avg(_hyper_2_87_chunk.value), PARTIAL min(_hyper_2_87_chunk.value), PARTIAL max(_hyper_2_87_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_87_chunk - Output: _hyper_2_87_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_88_chunk.value), PARTIAL avg(_hyper_2_88_chunk.value), PARTIAL min(_hyper_2_88_chunk.value), PARTIAL max(_hyper_2_88_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_88_chunk - Output: _hyper_2_88_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_89_chunk.value), PARTIAL avg(_hyper_2_89_chunk.value), PARTIAL min(_hyper_2_89_chunk.value), PARTIAL max(_hyper_2_89_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_89_chunk - Output: _hyper_2_89_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_90_chunk.value), PARTIAL avg(_hyper_2_90_chunk.value), PARTIAL min(_hyper_2_90_chunk.value), PARTIAL max(_hyper_2_90_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_90_chunk - Output: _hyper_2_90_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_91_chunk.value), PARTIAL avg(_hyper_2_91_chunk.value), PARTIAL min(_hyper_2_91_chunk.value), PARTIAL max(_hyper_2_91_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_91_chunk - Output: _hyper_2_91_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_92_chunk.value), PARTIAL avg(_hyper_2_92_chunk.value), PARTIAL min(_hyper_2_92_chunk.value), PARTIAL max(_hyper_2_92_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_92_chunk - Output: _hyper_2_92_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_93_chunk.value), PARTIAL avg(_hyper_2_93_chunk.value), PARTIAL min(_hyper_2_93_chunk.value), PARTIAL max(_hyper_2_93_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_93_chunk - Output: _hyper_2_93_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_94_chunk.value), PARTIAL avg(_hyper_2_94_chunk.value), PARTIAL min(_hyper_2_94_chunk.value), PARTIAL max(_hyper_2_94_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_94_chunk - Output: _hyper_2_94_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_95_chunk.value), PARTIAL avg(_hyper_2_95_chunk.value), PARTIAL min(_hyper_2_95_chunk.value), PARTIAL max(_hyper_2_95_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_95_chunk - Output: _hyper_2_95_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_96_chunk.value), PARTIAL avg(_hyper_2_96_chunk.value), PARTIAL min(_hyper_2_96_chunk.value), PARTIAL max(_hyper_2_96_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_96_chunk - Output: _hyper_2_96_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_97_chunk.value), PARTIAL avg(_hyper_2_97_chunk.value), PARTIAL min(_hyper_2_97_chunk.value), PARTIAL max(_hyper_2_97_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_97_chunk - Output: _hyper_2_97_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_98_chunk.value), PARTIAL avg(_hyper_2_98_chunk.value), PARTIAL min(_hyper_2_98_chunk.value), PARTIAL max(_hyper_2_98_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_98_chunk - Output: _hyper_2_98_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_99_chunk.value), PARTIAL avg(_hyper_2_99_chunk.value), PARTIAL min(_hyper_2_99_chunk.value), PARTIAL max(_hyper_2_99_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_99_chunk - Output: _hyper_2_99_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_100_chunk.value), PARTIAL avg(_hyper_2_100_chunk.value), PARTIAL min(_hyper_2_100_chunk.value), PARTIAL max(_hyper_2_100_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_100_chunk - Output: _hyper_2_100_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_101_chunk.value), PARTIAL avg(_hyper_2_101_chunk.value), PARTIAL min(_hyper_2_101_chunk.value), PARTIAL max(_hyper_2_101_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_101_chunk - Output: _hyper_2_101_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_102_chunk.value), PARTIAL avg(_hyper_2_102_chunk.value), PARTIAL min(_hyper_2_102_chunk.value), PARTIAL max(_hyper_2_102_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_102_chunk - Output: _hyper_2_102_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_103_chunk.value), PARTIAL avg(_hyper_2_103_chunk.value), PARTIAL min(_hyper_2_103_chunk.value), PARTIAL max(_hyper_2_103_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_103_chunk - Output: _hyper_2_103_chunk.value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_2_104_chunk.value), PARTIAL avg(_hyper_2_104_chunk.value), PARTIAL min(_hyper_2_104_chunk.value), PARTIAL max(_hyper_2_104_chunk.value), PARTIAL count(*) - -> Parallel Seq Scan on _timescaledb_internal._hyper_2_104_chunk - Output: _hyper_2_104_chunk.value -(410 rows) - -CREATE MATERIALIZED VIEW issue4922_partials_parallel AS - SELECT - _timescaledb_functions.partialize_agg(sum(value)) AS partial_sum, - _timescaledb_functions.partialize_agg(avg(value)) AS partial_avg, - _timescaledb_functions.partialize_agg(min(value)) AS partial_min, - _timescaledb_functions.partialize_agg(max(value)) AS partial_max, - _timescaledb_functions.partialize_agg(count(*)) AS partial_count - FROM public.issue4922; --- Materialize partials from execution of non-parallel query plan -SET max_parallel_workers_per_gather = 0; -EXPLAIN (VERBOSE, COSTS OFF) - SELECT - _timescaledb_functions.partialize_agg(sum(value)) AS partial_sum, - _timescaledb_functions.partialize_agg(avg(value)) AS partial_avg, - _timescaledb_functions.partialize_agg(min(value)) AS partial_min, - _timescaledb_functions.partialize_agg(max(value)) AS partial_max, - _timescaledb_functions.partialize_agg(count(*)) AS partial_count - FROM public.issue4922; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - Partial Aggregate - Output: _timescaledb_functions.partialize_agg(PARTIAL sum(_hyper_2_4_chunk.value)), _timescaledb_functions.partialize_agg(PARTIAL avg(_hyper_2_4_chunk.value)), _timescaledb_functions.partialize_agg(PARTIAL min(_hyper_2_4_chunk.value)), _timescaledb_functions.partialize_agg(PARTIAL max(_hyper_2_4_chunk.value)), _timescaledb_functions.partialize_agg(PARTIAL count(*)) - -> Append - -> Seq Scan on _timescaledb_internal._hyper_2_4_chunk - Output: _hyper_2_4_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_5_chunk - Output: _hyper_2_5_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_6_chunk - Output: _hyper_2_6_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_7_chunk - Output: _hyper_2_7_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_8_chunk - Output: _hyper_2_8_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_9_chunk - Output: _hyper_2_9_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_10_chunk - Output: _hyper_2_10_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_11_chunk - Output: _hyper_2_11_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_12_chunk - Output: _hyper_2_12_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_13_chunk - Output: _hyper_2_13_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_14_chunk - Output: _hyper_2_14_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_15_chunk - Output: _hyper_2_15_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_16_chunk - Output: _hyper_2_16_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_17_chunk - Output: _hyper_2_17_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_18_chunk - Output: _hyper_2_18_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_19_chunk - Output: _hyper_2_19_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_20_chunk - Output: _hyper_2_20_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_21_chunk - Output: _hyper_2_21_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_22_chunk - Output: _hyper_2_22_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_23_chunk - Output: _hyper_2_23_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_24_chunk - Output: _hyper_2_24_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_25_chunk - Output: _hyper_2_25_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_26_chunk - Output: _hyper_2_26_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_27_chunk - Output: _hyper_2_27_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_28_chunk - Output: _hyper_2_28_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_29_chunk - Output: _hyper_2_29_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_30_chunk - Output: _hyper_2_30_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_31_chunk - Output: _hyper_2_31_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_32_chunk - Output: _hyper_2_32_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_33_chunk - Output: _hyper_2_33_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_34_chunk - Output: _hyper_2_34_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_35_chunk - Output: _hyper_2_35_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_36_chunk - Output: _hyper_2_36_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_37_chunk - Output: _hyper_2_37_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_38_chunk - Output: _hyper_2_38_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_39_chunk - Output: _hyper_2_39_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_40_chunk - Output: _hyper_2_40_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_41_chunk - Output: _hyper_2_41_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_42_chunk - Output: _hyper_2_42_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_43_chunk - Output: _hyper_2_43_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_44_chunk - Output: _hyper_2_44_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_45_chunk - Output: _hyper_2_45_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_46_chunk - Output: _hyper_2_46_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_47_chunk - Output: _hyper_2_47_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_48_chunk - Output: _hyper_2_48_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_49_chunk - Output: _hyper_2_49_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_50_chunk - Output: _hyper_2_50_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_51_chunk - Output: _hyper_2_51_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_52_chunk - Output: _hyper_2_52_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_53_chunk - Output: _hyper_2_53_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_54_chunk - Output: _hyper_2_54_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_55_chunk - Output: _hyper_2_55_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_56_chunk - Output: _hyper_2_56_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_57_chunk - Output: _hyper_2_57_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_58_chunk - Output: _hyper_2_58_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_59_chunk - Output: _hyper_2_59_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_60_chunk - Output: _hyper_2_60_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_61_chunk - Output: _hyper_2_61_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_62_chunk - Output: _hyper_2_62_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_63_chunk - Output: _hyper_2_63_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_64_chunk - Output: _hyper_2_64_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_65_chunk - Output: _hyper_2_65_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_66_chunk - Output: _hyper_2_66_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_67_chunk - Output: _hyper_2_67_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_68_chunk - Output: _hyper_2_68_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_69_chunk - Output: _hyper_2_69_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_70_chunk - Output: _hyper_2_70_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_71_chunk - Output: _hyper_2_71_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_72_chunk - Output: _hyper_2_72_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_73_chunk - Output: _hyper_2_73_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_74_chunk - Output: _hyper_2_74_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_75_chunk - Output: _hyper_2_75_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_76_chunk - Output: _hyper_2_76_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_77_chunk - Output: _hyper_2_77_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_78_chunk - Output: _hyper_2_78_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_79_chunk - Output: _hyper_2_79_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_80_chunk - Output: _hyper_2_80_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_81_chunk - Output: _hyper_2_81_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_82_chunk - Output: _hyper_2_82_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_83_chunk - Output: _hyper_2_83_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_84_chunk - Output: _hyper_2_84_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_85_chunk - Output: _hyper_2_85_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_86_chunk - Output: _hyper_2_86_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_87_chunk - Output: _hyper_2_87_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_88_chunk - Output: _hyper_2_88_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_89_chunk - Output: _hyper_2_89_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_90_chunk - Output: _hyper_2_90_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_91_chunk - Output: _hyper_2_91_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_92_chunk - Output: _hyper_2_92_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_93_chunk - Output: _hyper_2_93_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_94_chunk - Output: _hyper_2_94_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_95_chunk - Output: _hyper_2_95_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_96_chunk - Output: _hyper_2_96_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_97_chunk - Output: _hyper_2_97_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_98_chunk - Output: _hyper_2_98_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_99_chunk - Output: _hyper_2_99_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_100_chunk - Output: _hyper_2_100_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_101_chunk - Output: _hyper_2_101_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_102_chunk - Output: _hyper_2_102_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_103_chunk - Output: _hyper_2_103_chunk.value - -> Seq Scan on _timescaledb_internal._hyper_2_104_chunk - Output: _hyper_2_104_chunk.value -(205 rows) - -CREATE MATERIALIZED VIEW issue4922_partials_non_parallel AS - SELECT - _timescaledb_functions.partialize_agg(sum(value)) AS partial_sum, - _timescaledb_functions.partialize_agg(avg(value)) AS partial_avg, - _timescaledb_functions.partialize_agg(min(value)) AS partial_min, - _timescaledb_functions.partialize_agg(max(value)) AS partial_max, - _timescaledb_functions.partialize_agg(count(*)) AS partial_count - FROM public.issue4922; -RESET max_parallel_workers_per_gather; --- partials should be the same in both parallel and non-parallel execution -SELECT * FROM issue4922_partials_parallel; - partial_sum | partial_avg | partial_min | partial_max | partial_count ---------------------+--------------------------------------------------------------------------------------------+-------------+-------------+-------------------- - \x00000000004c4fa9 | \x00000001000000000000001400000002000000010000000800000000000186a00000000800000000004c4fa9 | \x00000000 | \x00000064 | \x00000000000186a0 -(1 row) - -SELECT * FROM issue4922_partials_non_parallel; - partial_sum | partial_avg | partial_min | partial_max | partial_count ---------------------+--------------------------------------------------------------------------------------------+-------------+-------------+-------------------- - \x00000000004c4fa9 | \x00000001000000000000001400000002000000010000000800000000000186a00000000800000000004c4fa9 | \x00000000 | \x00000064 | \x00000000000186a0 -(1 row) - --- Compare results from partial and non-partial query execution -SELECT - sum(value), - avg(value), - min(value), - max(value), - count(*) -FROM issue4922; - sum | avg | min | max | count ----------+---------------------+-----+-----+-------- - 5001129 | 50.0112900000000000 | 0 | 100 | 100000 -(1 row) - --- The results should be the EQUAL TO the previous query -SELECT - _timescaledb_functions.finalize_agg('pg_catalog.sum(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_sum, NULL::bigint) AS sum, - _timescaledb_functions.finalize_agg('pg_catalog.avg(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_avg, NULL::numeric) AS avg, - _timescaledb_functions.finalize_agg('pg_catalog.min(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_min, NULL::integer) AS min, - _timescaledb_functions.finalize_agg('pg_catalog.max(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_max, NULL::integer) AS max, - _timescaledb_functions.finalize_agg('pg_catalog.count()'::text, NULL::name, NULL::name, '{}'::name[], partial_count, NULL::bigint) AS count -FROM issue4922_partials_parallel; - sum | avg | min | max | count ----------+---------------------+-----+-----+-------- - 5001129 | 50.0112900000000000 | 0 | 100 | 100000 -(1 row) - +ERROR: insufficient data left in message diff --git a/tsl/test/expected/read_only.out b/tsl/test/expected/read_only.out index ced4a428446..aa74d1540dd 100644 --- a/tsl/test/expected/read_only.out +++ b/tsl/test/expected/read_only.out @@ -5,8 +5,6 @@ -- Following tests checks that API functions which modify data (including catalog) -- properly recognize read-only transaction state -- -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 -- create_hypertable() -- CREATE TABLE test_table(time bigint NOT NULL, device int); @@ -96,112 +94,13 @@ ERROR: cannot execute DROP TABLE in a read-only transaction \set ON_ERROR_STOP 1 SET default_transaction_read_only TO off; DROP TABLE test_table; --- data nodes --- -CREATE TABLE disttable(time timestamptz NOT NULL, device int); --- add_data_node() --- -SET default_transaction_read_only TO on; -\set ON_ERROR_STOP 0 -SELECT * FROM add_data_node(:'DATA_NODE_1', host => 'localhost', database => :'DATA_NODE_1'); -ERROR: cannot execute add_data_node() in a read-only transaction -\set ON_ERROR_STOP 1 -SET default_transaction_read_only TO off; -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node(:'DATA_NODE_1', host => 'localhost', database => :'DATA_NODE_1'); - node_name | database | node_created | database_created | extension_created -----------------+----------------+--------------+------------------+------------------- - db_read_only_1 | db_read_only_1 | t | t | t -(1 row) - -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node(:'DATA_NODE_2', host => 'localhost', database => :'DATA_NODE_2'); - node_name | database | node_created | database_created | extension_created -----------------+----------------+--------------+------------------+------------------- - db_read_only_2 | db_read_only_2 | t | t | t -(1 row) - --- create_distributed_hypertable() --- -SET default_transaction_read_only TO on; -\set ON_ERROR_STOP 0 -SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', data_nodes => ARRAY[:'DATA_NODE_1']); -ERROR: cannot execute create_distributed_hypertable() in a read-only transaction -\set ON_ERROR_STOP 1 -SET default_transaction_read_only TO off; -SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', data_nodes => ARRAY[:'DATA_NODE_1']); -WARNING: only one data node was assigned to the hypertable - hypertable_id | schema_name | table_name | created ----------------+-------------+------------+--------- - 2 | public | disttable | t -(1 row) - --- attach_data_node() --- -SET default_transaction_read_only TO on; -\set ON_ERROR_STOP 0 -SELECT * FROM attach_data_node(:'DATA_NODE_2', 'disttable'); -ERROR: cannot execute attach_data_node() in a read-only transaction -\set ON_ERROR_STOP 1 -SET default_transaction_read_only TO off; -SELECT * FROM attach_data_node(:'DATA_NODE_2', 'disttable'); -NOTICE: the number of partitions in dimension "device" was increased to 2 - hypertable_id | node_hypertable_id | node_name ----------------+--------------------+---------------- - 2 | 1 | db_read_only_2 -(1 row) - --- detach_data_node() --- -SET default_transaction_read_only TO on; -\set ON_ERROR_STOP 0 -SELECT * FROM detach_data_node(:'DATA_NODE_2', 'disttable'); -ERROR: cannot execute detach_data_node() in a read-only transaction -\set ON_ERROR_STOP 1 -SET default_transaction_read_only TO off; -SELECT * FROM detach_data_node(:'DATA_NODE_2', 'disttable'); -NOTICE: the number of partitions in dimension "device" of hypertable "disttable" was decreased to 1 - detach_data_node ------------------- - 1 -(1 row) - --- delete_data_node() --- -SET default_transaction_read_only TO on; -\set ON_ERROR_STOP 0 -SELECT * FROM delete_data_node(:'DATA_NODE_2'); -ERROR: cannot execute delete_data_node() in a read-only transaction -\set ON_ERROR_STOP 1 -SET default_transaction_read_only TO off; -SELECT * FROM delete_data_node(:'DATA_NODE_2'); - delete_data_node ------------------- - t -(1 row) - --- set_replication_factor() --- -SET default_transaction_read_only TO on; -\set ON_ERROR_STOP 0 -SELECT * FROM set_replication_factor('disttable', 2); -ERROR: cannot execute set_replication_factor() in a read-only transaction -\set ON_ERROR_STOP 1 --- drop distributed hypertable --- -\set ON_ERROR_STOP 0 -DROP TABLE disttable; -ERROR: cannot execute DROP TABLE in a read-only transaction -\set ON_ERROR_STOP 1 -SET default_transaction_read_only TO off; -DROP TABLE disttable; -- Test some read-only cases of DDL operations -- CREATE TABLE test_table(time bigint NOT NULL, device int); SELECT * FROM create_hypertable('test_table', 'time', chunk_time_interval => 1000000::bigint); hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- - 3 | public | test_table | t + 2 | public | test_table | t (1 row) INSERT INTO test_table VALUES (0, 1), (1, 1), (2, 2); @@ -262,7 +161,7 @@ CREATE TABLE test_contagg ( SELECT create_hypertable('test_contagg', 'observation_time'); create_hypertable --------------------------- - (4,public,test_contagg,t) + (3,public,test_contagg,t) (1 row) SET default_transaction_read_only TO on; @@ -289,7 +188,7 @@ CREATE TABLE test_table_int(time bigint NOT NULL, device int); SELECt create_hypertable('test_table_int', 'time', chunk_time_interval=>'1'::bigint); create_hypertable ----------------------------- - (5,public,test_table_int,t) + (4,public,test_table_int,t) (1 row) create or replace function dummy_now() returns BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 5::BIGINT'; @@ -306,7 +205,7 @@ SELECT config as comp_job_config FROM _timescaledb_config.bgw_job WHERE id = :comp_job_id \gset SET default_transaction_read_only TO on; CALL _timescaledb_functions.policy_compression(:comp_job_id, :'comp_job_config'); -WARNING: compressing chunk "_timescaledb_internal._hyper_5_2_chunk" failed when compression policy is executed +WARNING: compressing chunk "_timescaledb_internal._hyper_4_2_chunk" failed when compression policy is executed SET default_transaction_read_only TO off; --verify chunks are not compressed SELECT count(*) , count(*) FILTER ( WHERE is_compressed is true) @@ -344,6 +243,3 @@ SELECT alter_job(1,scheduled:=false); ERROR: cannot execute alter_job() in a read-only transaction SELECT delete_job(1); ERROR: cannot execute delete_job() in a read-only transaction -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); diff --git a/tsl/test/expected/remote_connection_cache.out b/tsl/test/expected/remote_connection_cache.out index ab58d9e3103..d0d399d3a2f 100644 --- a/tsl/test/expected/remote_connection_cache.out +++ b/tsl/test/expected/remote_connection_cache.out @@ -16,6 +16,7 @@ SET client_min_messages TO WARNING; SELECT node_name, database, node_created, extension_created FROM add_data_node('loopback_1', host => 'localhost', database => :'DN_DBNAME_1', port => current_setting('port')::int); +WARNING: adding data node is deprecated node_name | database | node_created | extension_created ------------+------------------------------+--------------+------------------- loopback_1 | db_remote_connection_cache_1 | t | t @@ -24,6 +25,7 @@ FROM add_data_node('loopback_1', host => 'localhost', database => :'DN_DBNAME_1' SELECT node_name, database, node_created, extension_created FROM add_data_node('loopback_2', host => 'localhost', database => :'DN_DBNAME_2', port => current_setting('port')::int); +WARNING: adding data node is deprecated node_name | database | node_created | extension_created ------------+------------------------------+--------------+------------------- loopback_2 | db_remote_connection_cache_2 | t | t @@ -43,6 +45,7 @@ GRANT CREATE ON SCHEMA public TO :ROLE_1; SET ROLE :ROLE_1; CREATE TABLE testtable (time timestamptz, location int, temp float); SELECT * FROM create_distributed_hypertable('testtable', 'time', 'location'); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "time" hypertable_id | schema_name | table_name | created ---------------+-------------+------------+--------- diff --git a/tsl/test/expected/remote_copy.out b/tsl/test/expected/remote_copy.out index f2553c39c30..f0d1105b58a 100644 --- a/tsl/test/expected/remote_copy.out +++ b/tsl/test/expected/remote_copy.out @@ -11,6 +11,9 @@ FROM ( SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) ) a; +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated +WARNING: adding data node is deprecated node_name | database | node_created | database_created | extension_created ------------------+------------------+--------------+------------------+------------------- db_remote_copy_1 | db_remote_copy_1 | t | t | t @@ -327,6 +330,7 @@ set timescaledb.enable_connection_binary_data = true; set timescaledb.dist_copy_transfer_format = 'text'; create table escapes(t int, value text); select create_distributed_hypertable('escapes', 't', 'value', chunk_time_interval => 100); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "t" create_distributed_hypertable ------------------------------- @@ -368,6 +372,7 @@ ERROR: the number of columns doesn't match -- Test null values. create table null_values(t int, value text); select create_distributed_hypertable('null_values', 't', chunk_time_interval => 100); +WARNING: distributed hypertable is deprecated NOTICE: adding not-null constraint to column "t" create_distributed_hypertable ------------------------------- @@ -473,18 +478,21 @@ drop table null_values; drop table escapes; SET ROLE :ROLE_CLUSTER_SUPERUSER; SELECT * FROM delete_data_node(:'DATA_NODE_1'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t (1 row) SELECT * FROM delete_data_node(:'DATA_NODE_2'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t (1 row) SELECT * FROM delete_data_node(:'DATA_NODE_3'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t diff --git a/tsl/test/expected/remote_txn.out b/tsl/test/expected/remote_txn.out index 67bd4983671..e8985fed13c 100644 --- a/tsl/test/expected/remote_txn.out +++ b/tsl/test/expected/remote_txn.out @@ -53,12 +53,14 @@ CREATE OR REPLACE FUNCTION add_loopback_server( AS :TSL_MODULE_PATHNAME, 'ts_unchecked_add_data_node' LANGUAGE C; SELECT server_name, database, server_created, database_created, extension_created FROM add_loopback_server('loopback', database => :'TEST_DBNAME', bootstrap => false); +WARNING: adding data node is deprecated server_name | database | server_created | database_created | extension_created -------------+---------------+----------------+------------------+------------------- loopback | db_remote_txn | t | f | f (1 row) SELECT server_name, database, server_created, database_created, extension_created FROM add_loopback_server('loopback2', database => :'TEST_DBNAME', bootstrap => false); +WARNING: adding data node is deprecated server_name | database | server_created | database_created | extension_created -------------+---------------+----------------+------------------+------------------- loopback2 | db_remote_txn | t | f | f @@ -109,6 +111,7 @@ FROM _timescaledb_functions.show_connection_cache() ORDER BY 1,4; BEGIN; SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (20001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (20001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -141,6 +144,7 @@ SELECT count(*) FROM "S 1"."T 1" WHERE "C 1" = 20001; --aborted transaction BEGIN; SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (20002,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (20002,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -173,6 +177,7 @@ SELECT count(*) FROM "S 1"."T 1" WHERE "C 1" = 20002; --constraint violation BEGIN; SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (20001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (20001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') ERROR: [loopback]: duplicate key value violates unique constraint "t1_pkey" COMMIT; @@ -309,6 +314,7 @@ BEGIN; (1 row) SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (20005,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (20005,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -358,6 +364,7 @@ FROM _timescaledb_functions.show_connection_cache() ORDER BY 1,4; --block preparing transactions on the access node BEGIN; SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (20006,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (20006,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -382,6 +389,7 @@ FROM _timescaledb_functions.show_connection_cache() ORDER BY 1,4; --simple commit BEGIN; SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (10001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (10001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -420,6 +428,7 @@ SELECT count(*) FROM pg_prepared_xacts; --simple BEGIN; SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (11001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (11001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -458,6 +467,7 @@ SELECT count(*) FROM pg_prepared_xacts; --constraint violation should fail the txn BEGIN; SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (10001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (10001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') ERROR: [loopback]: duplicate key value violates unique constraint "t1_pkey" COMMIT; @@ -485,6 +495,7 @@ BEGIN; (1 row) SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (10002,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (10002,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -535,6 +546,7 @@ BEGIN; (1 row) SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (10003,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (10003,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -601,6 +613,7 @@ BEGIN; (1 row) SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (10004,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (10004,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -695,6 +708,7 @@ BEGIN; (1 row) SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (10006,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (10006,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -776,6 +790,7 @@ BEGIN; (1 row) SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (10005,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (10005,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -832,6 +847,7 @@ select count(*) from _timescaledb_catalog.remote_txn; --should be fine if we don't see any WARNINGS. BEGIN; SELECT test.remote_exec('{loopback}', $$ PREPARE prep_1 AS SELECT 1 $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: PREPARE prep_1 AS SELECT 1 remote_exec ------------- @@ -839,6 +855,7 @@ NOTICE: [loopback]: PREPARE prep_1 AS SELECT 1 (1 row) SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (10001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (10001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') ERROR: [loopback]: duplicate key value violates unique constraint "t1_pkey" COMMIT; @@ -853,6 +870,7 @@ FROM _timescaledb_functions.show_connection_cache() ORDER BY 1,4; BEGIN; SAVEPOINT save_1; SELECT test.remote_exec('{loopback}', $$ PREPARE prep_1 AS SELECT 1 $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: PREPARE prep_1 AS SELECT 1 remote_exec ------------- @@ -869,6 +887,7 @@ NOTICE: [loopback]: PREPARE prep_1 AS SELECT 1 --generate a unique violation SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (10001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (10001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') ERROR: [loopback]: duplicate key value violates unique constraint "t1_pkey" ROLLBACK TO SAVEPOINT save_1; @@ -882,6 +901,7 @@ ERROR: [loopback]: duplicate key value violates unique constraint "t1_pkey" (1 row) SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (81,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (81,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') ERROR: [loopback]: duplicate key value violates unique constraint "t1_pkey" COMMIT; @@ -1078,6 +1098,7 @@ FROM _timescaledb_functions.show_connection_cache() ORDER BY 1,4; --test simple subtrans abort. BEGIN; SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (10012,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (10012,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -1085,6 +1106,7 @@ NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (10012,1,'bleh', '2001-01-0 (1 row) SELECT test.remote_exec('{loopback2}', $$ INSERT INTO "S 1"."T 1" VALUES (10013,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback2]: INSERT INTO "S 1"."T 1" VALUES (10013,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -1101,6 +1123,7 @@ NOTICE: [loopback2]: INSERT INTO "S 1"."T 1" VALUES (10013,1,'bleh', '2001-01- SAVEPOINT save_1; SELECT test.remote_exec('{loopback2}', $$ INSERT INTO "S 1"."T 1" VALUES (10001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback2]: INSERT INTO "S 1"."T 1" VALUES (10001,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -1127,6 +1150,7 @@ NOTICE: [loopback2]: INSERT INTO "S 1"."T 1" VALUES (10001,1,'bleh', '2001-01- (2 rows) SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (10014,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (10014,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -1134,6 +1158,7 @@ NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (10014,1,'bleh', '2001-01-0 (1 row) SELECT test.remote_exec('{loopback2}', $$ INSERT INTO "S 1"."T 1" VALUES (10015,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback2]: INSERT INTO "S 1"."T 1" VALUES (10015,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -1246,6 +1271,7 @@ SELECT count(*) FROM pg_prepared_xacts; --block preparing transactions on the frontend BEGIN; SELECT test.remote_exec('{loopback}', $$ INSERT INTO "S 1"."T 1" VALUES (10051,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') $$); +WARNING: executing remote command is deprecated NOTICE: [loopback]: INSERT INTO "S 1"."T 1" VALUES (10051,1,'bleh', '2001-01-01', '2001-01-01', 'bleh') remote_exec ------------- @@ -1263,6 +1289,7 @@ SELECT count(*) FROM _timescaledb_catalog.remote_txn WHERE data_node_name = 'loo (1 row) SELECT * FROM delete_data_node('loopback'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t @@ -1275,6 +1302,7 @@ SELECT count(*) FROM _timescaledb_catalog.remote_txn WHERE data_node_name = 'loo (1 row) SELECT * FROM delete_data_node('loopback2'); +WARNING: deleting data node is deprecated delete_data_node ------------------ t diff --git a/tsl/test/expected/remote_txn_resolve.out b/tsl/test/expected/remote_txn_resolve.out index 4cfb9654435..d1e46bda8bd 100644 --- a/tsl/test/expected/remote_txn_resolve.out +++ b/tsl/test/expected/remote_txn_resolve.out @@ -27,18 +27,21 @@ CREATE OR REPLACE FUNCTION add_loopback_server( AS :TSL_MODULE_PATHNAME, 'ts_unchecked_add_data_node' LANGUAGE C; SELECT database FROM add_loopback_server('loopback', database => :'TEST_DBNAME', bootstrap => false); +WARNING: adding data node is deprecated database ----------------------- db_remote_txn_resolve (1 row) SELECT database FROM add_loopback_server('loopback2', database => :'TEST_DBNAME', bootstrap => false); +WARNING: adding data node is deprecated database ----------------------- db_remote_txn_resolve (1 row) SELECT database FROM add_loopback_server('loopback3', database => :'TEST_DBNAME', bootstrap => false); +WARNING: adding data node is deprecated database ----------------------- db_remote_txn_resolve diff --git a/tsl/test/expected/telemetry_stats-14.out b/tsl/test/expected/telemetry_stats-14.out deleted file mode 100644 index 095d92d7cd7..00000000000 --- a/tsl/test/expected/telemetry_stats-14.out +++ /dev/null @@ -1,1197 +0,0 @@ --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. ---telemetry tests that require a community license -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; --- function call info size is too variable for this test, so disable it -SET timescaledb.telemetry_level='no_functions'; -SELECT setseed(1); - setseed ---------- - -(1 row) - --- Create a materialized view from the telemetry report so that we --- don't regenerate telemetry for every query. Filter heap_size for --- materialized views since PG14 reports a different heap size for --- them compared to earlier PG versions. -CREATE MATERIALIZED VIEW telemetry_report AS -SELECT (r #- '{relations,materialized_views,heap_size}') AS r -FROM get_telemetry_report() r; -CREATE VIEW relations AS -SELECT r -> 'relations' AS rels -FROM telemetry_report; -SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, - rels -> 'hypertables' -> 'num_relations' AS num_hypertables -FROM relations; - num_continuous_aggs | num_hypertables ----------------------+----------------- - 0 | 0 -(1 row) - --- check telemetry picks up flagged content from metadata -SELECT r -> 'db_metadata' AS db_metadata -FROM telemetry_report; - db_metadata -------------- - {} -(1 row) - --- check timescaledb_telemetry.cloud -SELECT r -> 'instance_metadata' AS instance_metadata -FROM telemetry_report r; - instance_metadata -------------------- - {"cloud": "ci"} -(1 row) - -CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); -CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); -CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); -CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); -CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); -CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); -CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); -CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); -CREATE TABLE hyper (LIKE normal); -SELECT table_name FROM create_hypertable('hyper', 'time'); - table_name ------------- - hyper -(1 row) - -CREATE MATERIALIZED VIEW contagg -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - hyper -GROUP BY hour, device; -NOTICE: continuous aggregate "contagg" is already up-to-date -CREATE MATERIALIZED VIEW contagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - hyper -GROUP BY hour, device; -NOTICE: continuous aggregate "contagg_old" is already up-to-date --- Create another view (already have the "relations" view) -CREATE VIEW devices AS -SELECT DISTINCT ON (device) device -FROM hyper; --- Show relations with no data -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 0, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 0 + - }, + - "hypertables": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 8192, + - "num_children": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 0, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "continuous_aggregates": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "num_compressed_caggs": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + - } -(1 row) - --- Insert data -INSERT INTO normal -SELECT t, ceil(random() * 10)::int, random() * 30 -FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; -INSERT INTO hyper -SELECT * FROM normal; -INSERT INTO part -SELECT * FROM normal; -CALL refresh_continuous_aggregate('contagg', NULL, NULL); -CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); --- ANALYZE to get updated reltuples stats -ANALYZE normal, hyper, part; -SELECT count(c) FROM show_chunks('hyper') c; - count -------- - 9 -(1 row) - -SELECT count(c) FROM show_chunks('contagg') c; - count -------- - 2 -(1 row) - -SELECT count(c) FROM show_chunks('contagg_old') c; - count -------- - 2 -(1 row) - --- Update and show the telemetry report -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 65536, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 697 + - }, + - "hypertables": { + - "heap_size": 73728, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 155648, + - "num_children": 9, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 98304, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "continuous_aggregates": { + - "heap_size": 188416, + - "toast_size": 16384, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "num_compressed_caggs": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0 + - }, + - "indexes_size": 229376, + - "num_children": 4, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + - } -(1 row) - --- Actual row count should be the same as reltuples stats for all tables -SELECT (SELECT count(*) FROM normal) num_inserted_rows, - (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, - (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, - (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; - num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples --------------------+------------------+-----------------+---------------- - 697 | 697 | 697 | 697 -(1 row) - --- Add compression -ALTER TABLE hyper SET (timescaledb.compress); -SELECT compress_chunk(c) -FROM show_chunks('hyper') c ORDER BY c LIMIT 4; - compress_chunk ----------------------------------------- - _timescaledb_internal._hyper_1_1_chunk - _timescaledb_internal._hyper_1_2_chunk - _timescaledb_internal._hyper_1_3_chunk - _timescaledb_internal._hyper_1_4_chunk -(4 rows) - -ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); -NOTICE: defaulting compress_segmentby to device -NOTICE: defaulting compress_orderby to hour -SELECT compress_chunk(c) -FROM show_chunks('contagg') c ORDER BY c LIMIT 1; - compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_2_10_chunk -(1 row) - --- Turn of real-time aggregation -ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); -ANALYZE normal, hyper, part; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 65536, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 697 + - }, + - "hypertables": { + - "heap_size": 73728, + - "toast_size": 32768, + - "compression": { + - "compressed_heap_size": 32768, + - "compressed_row_count": 4, + - "compressed_toast_size": 32768, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 32768, + - "uncompressed_row_count": 284, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 65536, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 122880, + - "num_children": 9, + - "num_relations": 1, + - "num_reltuples": 413 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 98304, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "continuous_aggregates": { + - "heap_size": 180224, + - "toast_size": 24576, + - "compression": { + - "compressed_heap_size": 40960, + - "compressed_row_count": 10, + - "num_compressed_caggs": 1, + - "compressed_toast_size": 8192, + - "num_compressed_chunks": 1, + - "uncompressed_heap_size": 49152, + - "uncompressed_row_count": 452, + - "compressed_indexes_size": 16384, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 81920 + - }, + - "indexes_size": 180224, + - "num_children": 4, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 1 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + - } -(1 row) - --- Add distributed hypertables -\set DN_DBNAME_1 :TEST_DBNAME _1 -\set DN_DBNAME_2 :TEST_DBNAME _2 --- Not an access node or data node -SELECT r -> 'num_data_nodes' AS num_data_nodes, - r -> 'distributed_member' AS distributed_member -FROM telemetry_report; - num_data_nodes | distributed_member -----------------+-------------------- - | "none" -(1 row) - --- Become an access node by adding a data node -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); - node_name | database | node_created | database_created | extension_created --------------+----------------------+--------------+------------------+------------------- - data_node_1 | db_telemetry_stats_1 | t | t | t -(1 row) - --- Telemetry should show one data node and "acces node" status -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT r -> 'num_data_nodes' AS num_data_nodes, - r -> 'distributed_member' AS distributed_member -FROM telemetry_report; - num_data_nodes | distributed_member -----------------+-------------------- - 1 | "access node" -(1 row) - --- See telemetry report from a data node -\ir include/remote_exec.sql --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. -CREATE SCHEMA IF NOT EXISTS test; -psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping -GRANT USAGE ON SCHEMA test TO PUBLIC; -CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) -RETURNS VOID -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' -LANGUAGE C; -CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) -RETURNS TABLE("table_record" CSTRING[]) -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' -LANGUAGE C; -SELECT test.remote_exec(NULL, $$ - SELECT t -> 'num_data_nodes' AS num_data_nodes, - t -> 'distributed_member' AS distributed_member - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT t -> 'num_data_nodes' AS num_data_nodes, - t -> 'distributed_member' AS distributed_member - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -num_data_nodes|distributed_member ---------------+------------------ - |"data node" -(1 row) - - - remote_exec -------------- - -(1 row) - -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); - node_name | database | node_created | database_created | extension_created --------------+----------------------+--------------+------------------+------------------- - data_node_2 | db_telemetry_stats_2 | t | t | t -(1 row) - -CREATE TABLE disthyper (LIKE normal); -SELECT create_distributed_hypertable('disthyper', 'time', 'device'); - create_distributed_hypertable -------------------------------- - (6,public,disthyper,t) -(1 row) - --- Show distributed hypertables stats with no data -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 1, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- No datanode-related stats on the access node -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn -FROM relations; - distributed_hypertables_dn ------------------------------------------ - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0,+ - "num_compressed_hypertables": 0+ - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - } -(1 row) - --- Insert data into the distributed hypertable -INSERT INTO disthyper -SELECT * FROM normal; --- Update telemetry stats and show output on access node and data --- nodes. Note that the access node doesn't store data so shows --- zero. It should have stats from ANALYZE, though, like --- num_reltuples. -ANALYZE disthyper; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 18, + - "num_relations": 1, + - "num_reltuples": 697, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- Show data node stats -SELECT test.remote_exec(NULL, $$ - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -distributed_hypertables_dn --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -{ - "heap_size": 73728, - "toast_size": 0, - "compression": { - "compressed_heap_size": 0, - "compressed_row_count": 0, - "compressed_toast_size": 0, - "num_compressed_chunks": 0, - "uncompressed_heap_size": 0, - "uncompressed_row_count": 0, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 0, - "num_compressed_hypertables": 0 - }, - "indexes_size": 311296, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 357 -} -(1 row) - - -NOTICE: [data_node_2]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_2]: -distributed_hypertables_dn --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -{ - "heap_size": 73728, - "toast_size": 0, - "compression": { - "compressed_heap_size": 0, - "compressed_row_count": 0, - "compressed_toast_size": 0, - "num_compressed_chunks": 0, - "uncompressed_heap_size": 0, - "uncompressed_row_count": 0, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 0, - "num_compressed_hypertables": 0 - }, - "indexes_size": 311296, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 340 -} -(1 row) - - - remote_exec -------------- - -(1 row) - --- Add compression -ALTER TABLE disthyper SET (timescaledb.compress); -SELECT compress_chunk(c) -FROM show_chunks('disthyper') c ORDER BY c LIMIT 4; - compress_chunk ----------------------------------------------- - _timescaledb_internal._dist_hyper_6_19_chunk - _timescaledb_internal._dist_hyper_6_20_chunk - _timescaledb_internal._dist_hyper_6_21_chunk - _timescaledb_internal._dist_hyper_6_22_chunk -(4 rows) - -ANALYZE disthyper; --- Update telemetry stats and show updated compression stats -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 0, + - "num_children": 18, + - "num_relations": 1, + - "num_reltuples": 581, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- Show data node stats -SELECT test.remote_exec(NULL, $$ - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -distributed_hypertables_dn ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -{ - "heap_size": 73728, - "toast_size": 16384, - "compression": { - "compressed_heap_size": 16384, - "compressed_row_count": 2, - "compressed_toast_size": 16384, - "num_compressed_chunks": 2, - "uncompressed_heap_size": 16384, - "uncompressed_row_count": 72, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 65536, - "num_compressed_hypertables": 1 - }, - "indexes_size": 278528, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 285 -} -(1 row) - - -NOTICE: [data_node_2]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_2]: -distributed_hypertables_dn ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -{ - "heap_size": 73728, - "toast_size": 16384, - "compression": { - "compressed_heap_size": 16384, - "compressed_row_count": 2, - "compressed_toast_size": 16384, - "num_compressed_chunks": 2, - "uncompressed_heap_size": 16384, - "uncompressed_row_count": 44, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 65536, - "num_compressed_hypertables": 1 - }, - "indexes_size": 278528, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 296 -} -(1 row) - - - remote_exec -------------- - -(1 row) - --- Create a replicated distributed hypertable and show replication stats -CREATE TABLE disthyper_repl (LIKE normal); -SELECT create_distributed_hypertable('disthyper_repl', 'time', 'device', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (7,public,disthyper_repl,t) -(1 row) - -INSERT INTO disthyper_repl -SELECT * FROM normal; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 0, + - "num_children": 36, + - "num_relations": 2, + - "num_reltuples": 581, + - "num_replica_chunks": 18, + - "num_replicated_distributed_hypertables": 1+ - } -(1 row) - --- Create a continuous aggregate on the distributed hypertable -CREATE MATERIALIZED VIEW distcontagg -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - disthyper -GROUP BY hour, device; -NOTICE: refreshing continuous aggregate "distcontagg" -CREATE MATERIALIZED VIEW distcontagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - disthyper -GROUP BY hour, device; -NOTICE: refreshing continuous aggregate "distcontagg_old" -VACUUM; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'continuous_aggregates') AS continuous_aggregates -FROM relations; - continuous_aggregates ------------------------------------------------- - { + - "heap_size": 425984, + - "toast_size": 40960, + - "compression": { + - "compressed_heap_size": 40960, + - "compressed_row_count": 10, + - "num_compressed_caggs": 1, + - "compressed_toast_size": 8192, + - "num_compressed_chunks": 1, + - "uncompressed_heap_size": 49152, + - "uncompressed_row_count": 452, + - "compressed_indexes_size": 16384, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 81920 + - }, + - "indexes_size": 409600, + - "num_children": 8, + - "num_relations": 4, + - "num_reltuples": 2336, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 2, + - "num_caggs_on_distributed_hypertables": 2,+ - "num_caggs_using_real_time_aggregation": 3+ - } -(1 row) - --- check telemetry for fixed schedule jobs works -create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ -begin -raise log 'this is job_test_fixed'; -end -$$; -create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ -begin -raise log 'this is job_test_drifting'; -end -$$; --- before adding the jobs -select get_telemetry_report()->'num_user_defined_actions_fixed'; - ?column? ----------- - 0 -(1 row) - -select get_telemetry_report()->'num_user_defined_actions'; - ?column? ----------- - 0 -(1 row) - -select add_job('job_test_fixed', '1 week'); - add_job ---------- - 1000 -(1 row) - -select add_job('job_test_drifting', '1 week', fixed_schedule => false); - add_job ---------- - 1001 -(1 row) - --- add continuous aggregate refresh policy for contagg -select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting - add_continuous_aggregate_policy ---------------------------------- - 1002 -(1 row) - -select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed - add_continuous_aggregate_policy ---------------------------------- - 1003 -(1 row) - --- add retention policy, fixed -select add_retention_policy('hyper', interval '1 year', initial_start => now()); - add_retention_policy ----------------------- - 1004 -(1 row) - --- add compression policy -select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); - add_compression_policy ------------------------- - 1005 -(1 row) - -select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; - uda_fixed | uda_drifting ------------+-------------- - 1 | 1 -(1 row) - -select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; - contagg_fixed | contagg_drifting ----------------+------------------ - 1 | 1 -(1 row) - -select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; - compress_fixed | retention_fixed -----------------+----------------- - 1 | 1 -(1 row) - -DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; -TRUNCATE _timescaledb_internal.job_errors; --- create some "errors" for testing -INSERT INTO -_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) -VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), -(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), -(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'), -(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'), -(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'), --- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions -(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'); --- create some errors for them -INSERT INTO -_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) -values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), -(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), -(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), -(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'), -(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'), -(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'), -(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'); --- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs -SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); - jsonb_pretty ----------------------------------------------- - { + - "policy_retention": { + - "P0001": 1 + - }, + - "policy_compression": { + - "JF009": 1 + - }, + - "user_defined_action": { + - "ABCDE": 1, + - "P0001": 2 + - }, + - "policy_refresh_continuous_aggregate": {+ - "P0001": 2 + - } + - } -(1 row) - --- for job statistics, insert some records into bgw_job_stats -INSERT INTO _timescaledb_internal.bgw_job_stat -values -(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); -SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); - jsonb_pretty ------------------------------------------------- - { + - "policy_retention": { + - "total_runs": 1, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 1, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 2 secs",+ - "max_consecutive_failures": 1 + - }, + - "policy_compression": { + - "total_runs": 1, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 1, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 2 secs",+ - "max_consecutive_failures": 1 + - }, + - "user_defined_action": { + - "total_runs": 2, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 2, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 4 secs",+ - "max_consecutive_failures": 1 + - }, + - "policy_refresh_continuous_aggregate": { + - "total_runs": 2, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 2, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 4 secs",+ - "max_consecutive_failures": 1 + - } + - } -(1 row) - --- create nested continuous aggregates - copied from cagg_on_cagg_common -CREATE TABLE conditions ( - time timestamptz NOT NULL, - temperature int -); -SELECT create_hypertable('conditions', 'time'); - create_hypertable --------------------------- - (10,public,conditions,t) -(1 row) - -CREATE MATERIALIZED VIEW conditions_summary_hourly_1 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - time_bucket('1 hour', "time") AS bucket, - SUM(temperature) AS temperature -FROM conditions -GROUP BY 1 -WITH NO DATA; -CREATE MATERIALIZED VIEW conditions_summary_daily_2 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - time_bucket('1 day', "bucket") AS bucket, - SUM(temperature) AS temperature -FROM conditions_summary_hourly_1 -GROUP BY 1 -WITH NO DATA; -CREATE MATERIALIZED VIEW conditions_summary_weekly_3 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - time_bucket('1 week', "bucket") AS bucket, - SUM(temperature) AS temperature -FROM conditions_summary_daily_2 -GROUP BY 1 -WITH NO DATA; -SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); - jsonb_pretty --------------- - 2 -(1 row) - -DROP VIEW relations; -DROP MATERIALIZED VIEW telemetry_report; -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -DROP DATABASE :DN_DBNAME_1 WITH (FORCE); -DROP DATABASE :DN_DBNAME_2 WITH (FORCE); diff --git a/tsl/test/expected/telemetry_stats-15.out b/tsl/test/expected/telemetry_stats-15.out deleted file mode 100644 index cf2aa3db3df..00000000000 --- a/tsl/test/expected/telemetry_stats-15.out +++ /dev/null @@ -1,1197 +0,0 @@ --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. ---telemetry tests that require a community license -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; --- function call info size is too variable for this test, so disable it -SET timescaledb.telemetry_level='no_functions'; -SELECT setseed(1); - setseed ---------- - -(1 row) - --- Create a materialized view from the telemetry report so that we --- don't regenerate telemetry for every query. Filter heap_size for --- materialized views since PG14 reports a different heap size for --- them compared to earlier PG versions. -CREATE MATERIALIZED VIEW telemetry_report AS -SELECT (r #- '{relations,materialized_views,heap_size}') AS r -FROM get_telemetry_report() r; -CREATE VIEW relations AS -SELECT r -> 'relations' AS rels -FROM telemetry_report; -SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, - rels -> 'hypertables' -> 'num_relations' AS num_hypertables -FROM relations; - num_continuous_aggs | num_hypertables ----------------------+----------------- - 0 | 0 -(1 row) - --- check telemetry picks up flagged content from metadata -SELECT r -> 'db_metadata' AS db_metadata -FROM telemetry_report; - db_metadata -------------- - {} -(1 row) - --- check timescaledb_telemetry.cloud -SELECT r -> 'instance_metadata' AS instance_metadata -FROM telemetry_report r; - instance_metadata -------------------- - {"cloud": "ci"} -(1 row) - -CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); -CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); -CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); -CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); -CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); -CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); -CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); -CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); -CREATE TABLE hyper (LIKE normal); -SELECT table_name FROM create_hypertable('hyper', 'time'); - table_name ------------- - hyper -(1 row) - -CREATE MATERIALIZED VIEW contagg -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - hyper -GROUP BY hour, device; -NOTICE: continuous aggregate "contagg" is already up-to-date -CREATE MATERIALIZED VIEW contagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - hyper -GROUP BY hour, device; -NOTICE: continuous aggregate "contagg_old" is already up-to-date --- Create another view (already have the "relations" view) -CREATE VIEW devices AS -SELECT DISTINCT ON (device) device -FROM hyper; --- Show relations with no data -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 0, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 0 + - }, + - "hypertables": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 8192, + - "num_children": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 0, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "continuous_aggregates": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "num_compressed_caggs": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + - } -(1 row) - --- Insert data -INSERT INTO normal -SELECT t, ceil(random() * 10)::int, random() * 30 -FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; -INSERT INTO hyper -SELECT * FROM normal; -INSERT INTO part -SELECT * FROM normal; -CALL refresh_continuous_aggregate('contagg', NULL, NULL); -CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); --- ANALYZE to get updated reltuples stats -ANALYZE normal, hyper, part; -SELECT count(c) FROM show_chunks('hyper') c; - count -------- - 9 -(1 row) - -SELECT count(c) FROM show_chunks('contagg') c; - count -------- - 2 -(1 row) - -SELECT count(c) FROM show_chunks('contagg_old') c; - count -------- - 2 -(1 row) - --- Update and show the telemetry report -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 65536, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 697 + - }, + - "hypertables": { + - "heap_size": 73728, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 155648, + - "num_children": 9, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 98304, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "continuous_aggregates": { + - "heap_size": 188416, + - "toast_size": 16384, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "num_compressed_caggs": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0 + - }, + - "indexes_size": 229376, + - "num_children": 4, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + - } -(1 row) - --- Actual row count should be the same as reltuples stats for all tables -SELECT (SELECT count(*) FROM normal) num_inserted_rows, - (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, - (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, - (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; - num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples --------------------+------------------+-----------------+---------------- - 697 | 697 | 697 | 697 -(1 row) - --- Add compression -ALTER TABLE hyper SET (timescaledb.compress); -SELECT compress_chunk(c) -FROM show_chunks('hyper') c ORDER BY c LIMIT 4; - compress_chunk ----------------------------------------- - _timescaledb_internal._hyper_1_1_chunk - _timescaledb_internal._hyper_1_2_chunk - _timescaledb_internal._hyper_1_3_chunk - _timescaledb_internal._hyper_1_4_chunk -(4 rows) - -ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); -NOTICE: defaulting compress_segmentby to device -NOTICE: defaulting compress_orderby to hour -SELECT compress_chunk(c) -FROM show_chunks('contagg') c ORDER BY c LIMIT 1; - compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_2_10_chunk -(1 row) - --- Turn of real-time aggregation -ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); -ANALYZE normal, hyper, part; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 65536, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 697 + - }, + - "hypertables": { + - "heap_size": 73728, + - "toast_size": 32768, + - "compression": { + - "compressed_heap_size": 32768, + - "compressed_row_count": 4, + - "compressed_toast_size": 32768, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 32768, + - "uncompressed_row_count": 284, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 65536, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 122880, + - "num_children": 9, + - "num_relations": 1, + - "num_reltuples": 413 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 98304, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "continuous_aggregates": { + - "heap_size": 180224, + - "toast_size": 24576, + - "compression": { + - "compressed_heap_size": 40960, + - "compressed_row_count": 10, + - "num_compressed_caggs": 1, + - "compressed_toast_size": 8192, + - "num_compressed_chunks": 1, + - "uncompressed_heap_size": 49152, + - "uncompressed_row_count": 452, + - "compressed_indexes_size": 16384, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 81920 + - }, + - "indexes_size": 180224, + - "num_children": 4, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 1 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + - } -(1 row) - --- Add distributed hypertables -\set DN_DBNAME_1 :TEST_DBNAME _1 -\set DN_DBNAME_2 :TEST_DBNAME _2 --- Not an access node or data node -SELECT r -> 'num_data_nodes' AS num_data_nodes, - r -> 'distributed_member' AS distributed_member -FROM telemetry_report; - num_data_nodes | distributed_member -----------------+-------------------- - | "none" -(1 row) - --- Become an access node by adding a data node -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); - node_name | database | node_created | database_created | extension_created --------------+----------------------+--------------+------------------+------------------- - data_node_1 | db_telemetry_stats_1 | t | t | t -(1 row) - --- Telemetry should show one data node and "acces node" status -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT r -> 'num_data_nodes' AS num_data_nodes, - r -> 'distributed_member' AS distributed_member -FROM telemetry_report; - num_data_nodes | distributed_member -----------------+-------------------- - 1 | "access node" -(1 row) - --- See telemetry report from a data node -\ir include/remote_exec.sql --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. -CREATE SCHEMA IF NOT EXISTS test; -psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping -GRANT USAGE ON SCHEMA test TO PUBLIC; -CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) -RETURNS VOID -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' -LANGUAGE C; -CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) -RETURNS TABLE("table_record" CSTRING[]) -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' -LANGUAGE C; -SELECT test.remote_exec(NULL, $$ - SELECT t -> 'num_data_nodes' AS num_data_nodes, - t -> 'distributed_member' AS distributed_member - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT t -> 'num_data_nodes' AS num_data_nodes, - t -> 'distributed_member' AS distributed_member - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -num_data_nodes|distributed_member ---------------+------------------ - |"data node" -(1 row) - - - remote_exec -------------- - -(1 row) - -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); - node_name | database | node_created | database_created | extension_created --------------+----------------------+--------------+------------------+------------------- - data_node_2 | db_telemetry_stats_2 | t | t | t -(1 row) - -CREATE TABLE disthyper (LIKE normal); -SELECT create_distributed_hypertable('disthyper', 'time', 'device'); - create_distributed_hypertable -------------------------------- - (6,public,disthyper,t) -(1 row) - --- Show distributed hypertables stats with no data -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 1, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- No datanode-related stats on the access node -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn -FROM relations; - distributed_hypertables_dn ------------------------------------------ - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0,+ - "num_compressed_hypertables": 0+ - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - } -(1 row) - --- Insert data into the distributed hypertable -INSERT INTO disthyper -SELECT * FROM normal; --- Update telemetry stats and show output on access node and data --- nodes. Note that the access node doesn't store data so shows --- zero. It should have stats from ANALYZE, though, like --- num_reltuples. -ANALYZE disthyper; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 18, + - "num_relations": 1, + - "num_reltuples": 697, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- Show data node stats -SELECT test.remote_exec(NULL, $$ - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -distributed_hypertables_dn --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -{ - "heap_size": 73728, - "toast_size": 0, - "compression": { - "compressed_heap_size": 0, - "compressed_row_count": 0, - "compressed_toast_size": 0, - "num_compressed_chunks": 0, - "uncompressed_heap_size": 0, - "uncompressed_row_count": 0, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 0, - "num_compressed_hypertables": 0 - }, - "indexes_size": 311296, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 368 -} -(1 row) - - -NOTICE: [data_node_2]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_2]: -distributed_hypertables_dn --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -{ - "heap_size": 73728, - "toast_size": 0, - "compression": { - "compressed_heap_size": 0, - "compressed_row_count": 0, - "compressed_toast_size": 0, - "num_compressed_chunks": 0, - "uncompressed_heap_size": 0, - "uncompressed_row_count": 0, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 0, - "num_compressed_hypertables": 0 - }, - "indexes_size": 311296, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 329 -} -(1 row) - - - remote_exec -------------- - -(1 row) - --- Add compression -ALTER TABLE disthyper SET (timescaledb.compress); -SELECT compress_chunk(c) -FROM show_chunks('disthyper') c ORDER BY c LIMIT 4; - compress_chunk ----------------------------------------------- - _timescaledb_internal._dist_hyper_6_19_chunk - _timescaledb_internal._dist_hyper_6_20_chunk - _timescaledb_internal._dist_hyper_6_21_chunk - _timescaledb_internal._dist_hyper_6_22_chunk -(4 rows) - -ANALYZE disthyper; --- Update telemetry stats and show updated compression stats -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 0, + - "num_children": 18, + - "num_relations": 1, + - "num_reltuples": 581, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- Show data node stats -SELECT test.remote_exec(NULL, $$ - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -distributed_hypertables_dn ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -{ - "heap_size": 73728, - "toast_size": 16384, - "compression": { - "compressed_heap_size": 16384, - "compressed_row_count": 2, - "compressed_toast_size": 16384, - "num_compressed_chunks": 2, - "uncompressed_heap_size": 16384, - "uncompressed_row_count": 56, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 65536, - "num_compressed_hypertables": 1 - }, - "indexes_size": 278528, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 312 -} -(1 row) - - -NOTICE: [data_node_2]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_2]: -distributed_hypertables_dn ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -{ - "heap_size": 73728, - "toast_size": 16384, - "compression": { - "compressed_heap_size": 16384, - "compressed_row_count": 2, - "compressed_toast_size": 16384, - "num_compressed_chunks": 2, - "uncompressed_heap_size": 16384, - "uncompressed_row_count": 60, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 65536, - "num_compressed_hypertables": 1 - }, - "indexes_size": 278528, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 269 -} -(1 row) - - - remote_exec -------------- - -(1 row) - --- Create a replicated distributed hypertable and show replication stats -CREATE TABLE disthyper_repl (LIKE normal); -SELECT create_distributed_hypertable('disthyper_repl', 'time', 'device', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (7,public,disthyper_repl,t) -(1 row) - -INSERT INTO disthyper_repl -SELECT * FROM normal; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 0, + - "num_children": 36, + - "num_relations": 2, + - "num_reltuples": 581, + - "num_replica_chunks": 18, + - "num_replicated_distributed_hypertables": 1+ - } -(1 row) - --- Create a continuous aggregate on the distributed hypertable -CREATE MATERIALIZED VIEW distcontagg -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - disthyper -GROUP BY hour, device; -NOTICE: refreshing continuous aggregate "distcontagg" -CREATE MATERIALIZED VIEW distcontagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - disthyper -GROUP BY hour, device; -NOTICE: refreshing continuous aggregate "distcontagg_old" -VACUUM; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'continuous_aggregates') AS continuous_aggregates -FROM relations; - continuous_aggregates ------------------------------------------------- - { + - "heap_size": 425984, + - "toast_size": 40960, + - "compression": { + - "compressed_heap_size": 40960, + - "compressed_row_count": 10, + - "num_compressed_caggs": 1, + - "compressed_toast_size": 8192, + - "num_compressed_chunks": 1, + - "uncompressed_heap_size": 49152, + - "uncompressed_row_count": 452, + - "compressed_indexes_size": 16384, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 81920 + - }, + - "indexes_size": 409600, + - "num_children": 8, + - "num_relations": 4, + - "num_reltuples": 2336, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 2, + - "num_caggs_on_distributed_hypertables": 2,+ - "num_caggs_using_real_time_aggregation": 3+ - } -(1 row) - --- check telemetry for fixed schedule jobs works -create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ -begin -raise log 'this is job_test_fixed'; -end -$$; -create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ -begin -raise log 'this is job_test_drifting'; -end -$$; --- before adding the jobs -select get_telemetry_report()->'num_user_defined_actions_fixed'; - ?column? ----------- - 0 -(1 row) - -select get_telemetry_report()->'num_user_defined_actions'; - ?column? ----------- - 0 -(1 row) - -select add_job('job_test_fixed', '1 week'); - add_job ---------- - 1000 -(1 row) - -select add_job('job_test_drifting', '1 week', fixed_schedule => false); - add_job ---------- - 1001 -(1 row) - --- add continuous aggregate refresh policy for contagg -select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting - add_continuous_aggregate_policy ---------------------------------- - 1002 -(1 row) - -select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed - add_continuous_aggregate_policy ---------------------------------- - 1003 -(1 row) - --- add retention policy, fixed -select add_retention_policy('hyper', interval '1 year', initial_start => now()); - add_retention_policy ----------------------- - 1004 -(1 row) - --- add compression policy -select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); - add_compression_policy ------------------------- - 1005 -(1 row) - -select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; - uda_fixed | uda_drifting ------------+-------------- - 1 | 1 -(1 row) - -select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; - contagg_fixed | contagg_drifting ----------------+------------------ - 1 | 1 -(1 row) - -select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; - compress_fixed | retention_fixed -----------------+----------------- - 1 | 1 -(1 row) - -DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; -TRUNCATE _timescaledb_internal.job_errors; --- create some "errors" for testing -INSERT INTO -_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) -VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), -(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), -(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'), -(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'), -(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'), --- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions -(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'); --- create some errors for them -INSERT INTO -_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) -values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), -(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), -(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), -(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'), -(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'), -(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'), -(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'); --- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs -SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); - jsonb_pretty ----------------------------------------------- - { + - "policy_retention": { + - "P0001": 1 + - }, + - "policy_compression": { + - "JF009": 1 + - }, + - "user_defined_action": { + - "ABCDE": 1, + - "P0001": 2 + - }, + - "policy_refresh_continuous_aggregate": {+ - "P0001": 2 + - } + - } -(1 row) - --- for job statistics, insert some records into bgw_job_stats -INSERT INTO _timescaledb_internal.bgw_job_stat -values -(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); -SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); - jsonb_pretty ------------------------------------------------- - { + - "policy_retention": { + - "total_runs": 1, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 1, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 2 secs",+ - "max_consecutive_failures": 1 + - }, + - "policy_compression": { + - "total_runs": 1, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 1, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 2 secs",+ - "max_consecutive_failures": 1 + - }, + - "user_defined_action": { + - "total_runs": 2, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 2, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 4 secs",+ - "max_consecutive_failures": 1 + - }, + - "policy_refresh_continuous_aggregate": { + - "total_runs": 2, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 2, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 4 secs",+ - "max_consecutive_failures": 1 + - } + - } -(1 row) - --- create nested continuous aggregates - copied from cagg_on_cagg_common -CREATE TABLE conditions ( - time timestamptz NOT NULL, - temperature int -); -SELECT create_hypertable('conditions', 'time'); - create_hypertable --------------------------- - (10,public,conditions,t) -(1 row) - -CREATE MATERIALIZED VIEW conditions_summary_hourly_1 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - time_bucket('1 hour', "time") AS bucket, - SUM(temperature) AS temperature -FROM conditions -GROUP BY 1 -WITH NO DATA; -CREATE MATERIALIZED VIEW conditions_summary_daily_2 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - time_bucket('1 day', "bucket") AS bucket, - SUM(temperature) AS temperature -FROM conditions_summary_hourly_1 -GROUP BY 1 -WITH NO DATA; -CREATE MATERIALIZED VIEW conditions_summary_weekly_3 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - time_bucket('1 week', "bucket") AS bucket, - SUM(temperature) AS temperature -FROM conditions_summary_daily_2 -GROUP BY 1 -WITH NO DATA; -SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); - jsonb_pretty --------------- - 2 -(1 row) - -DROP VIEW relations; -DROP MATERIALIZED VIEW telemetry_report; -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -DROP DATABASE :DN_DBNAME_1 WITH (FORCE); -DROP DATABASE :DN_DBNAME_2 WITH (FORCE); diff --git a/tsl/test/expected/telemetry_stats-16.out b/tsl/test/expected/telemetry_stats-16.out deleted file mode 100644 index cf2aa3db3df..00000000000 --- a/tsl/test/expected/telemetry_stats-16.out +++ /dev/null @@ -1,1197 +0,0 @@ --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. ---telemetry tests that require a community license -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; --- function call info size is too variable for this test, so disable it -SET timescaledb.telemetry_level='no_functions'; -SELECT setseed(1); - setseed ---------- - -(1 row) - --- Create a materialized view from the telemetry report so that we --- don't regenerate telemetry for every query. Filter heap_size for --- materialized views since PG14 reports a different heap size for --- them compared to earlier PG versions. -CREATE MATERIALIZED VIEW telemetry_report AS -SELECT (r #- '{relations,materialized_views,heap_size}') AS r -FROM get_telemetry_report() r; -CREATE VIEW relations AS -SELECT r -> 'relations' AS rels -FROM telemetry_report; -SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, - rels -> 'hypertables' -> 'num_relations' AS num_hypertables -FROM relations; - num_continuous_aggs | num_hypertables ----------------------+----------------- - 0 | 0 -(1 row) - --- check telemetry picks up flagged content from metadata -SELECT r -> 'db_metadata' AS db_metadata -FROM telemetry_report; - db_metadata -------------- - {} -(1 row) - --- check timescaledb_telemetry.cloud -SELECT r -> 'instance_metadata' AS instance_metadata -FROM telemetry_report r; - instance_metadata -------------------- - {"cloud": "ci"} -(1 row) - -CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); -CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); -CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); -CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); -CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); -CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); -CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); -CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); -CREATE TABLE hyper (LIKE normal); -SELECT table_name FROM create_hypertable('hyper', 'time'); - table_name ------------- - hyper -(1 row) - -CREATE MATERIALIZED VIEW contagg -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - hyper -GROUP BY hour, device; -NOTICE: continuous aggregate "contagg" is already up-to-date -CREATE MATERIALIZED VIEW contagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - hyper -GROUP BY hour, device; -NOTICE: continuous aggregate "contagg_old" is already up-to-date --- Create another view (already have the "relations" view) -CREATE VIEW devices AS -SELECT DISTINCT ON (device) device -FROM hyper; --- Show relations with no data -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 0, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 0 + - }, + - "hypertables": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 8192, + - "num_children": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 0, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "continuous_aggregates": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "num_compressed_caggs": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + - } -(1 row) - --- Insert data -INSERT INTO normal -SELECT t, ceil(random() * 10)::int, random() * 30 -FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; -INSERT INTO hyper -SELECT * FROM normal; -INSERT INTO part -SELECT * FROM normal; -CALL refresh_continuous_aggregate('contagg', NULL, NULL); -CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); --- ANALYZE to get updated reltuples stats -ANALYZE normal, hyper, part; -SELECT count(c) FROM show_chunks('hyper') c; - count -------- - 9 -(1 row) - -SELECT count(c) FROM show_chunks('contagg') c; - count -------- - 2 -(1 row) - -SELECT count(c) FROM show_chunks('contagg_old') c; - count -------- - 2 -(1 row) - --- Update and show the telemetry report -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 65536, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 697 + - }, + - "hypertables": { + - "heap_size": 73728, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 155648, + - "num_children": 9, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 98304, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "continuous_aggregates": { + - "heap_size": 188416, + - "toast_size": 16384, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "num_compressed_caggs": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0 + - }, + - "indexes_size": 229376, + - "num_children": 4, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 2 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + - } -(1 row) - --- Actual row count should be the same as reltuples stats for all tables -SELECT (SELECT count(*) FROM normal) num_inserted_rows, - (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, - (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, - (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; - num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples --------------------+------------------+-----------------+---------------- - 697 | 697 | 697 | 697 -(1 row) - --- Add compression -ALTER TABLE hyper SET (timescaledb.compress); -SELECT compress_chunk(c) -FROM show_chunks('hyper') c ORDER BY c LIMIT 4; - compress_chunk ----------------------------------------- - _timescaledb_internal._hyper_1_1_chunk - _timescaledb_internal._hyper_1_2_chunk - _timescaledb_internal._hyper_1_3_chunk - _timescaledb_internal._hyper_1_4_chunk -(4 rows) - -ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); -NOTICE: defaulting compress_segmentby to device -NOTICE: defaulting compress_orderby to hour -SELECT compress_chunk(c) -FROM show_chunks('contagg') c ORDER BY c LIMIT 1; - compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_2_10_chunk -(1 row) - --- Turn of real-time aggregation -ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); -ANALYZE normal, hyper, part; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT jsonb_pretty(rels) AS relations FROM relations; - relations ------------------------------------------------------ - { + - "views": { + - "num_relations": 2 + - }, + - "tables": { + - "heap_size": 65536, + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 2, + - "num_reltuples": 697 + - }, + - "hypertables": { + - "heap_size": 73728, + - "toast_size": 32768, + - "compression": { + - "compressed_heap_size": 32768, + - "compressed_row_count": 4, + - "compressed_toast_size": 32768, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 32768, + - "uncompressed_row_count": 284, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 65536, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 122880, + - "num_children": 9, + - "num_relations": 1, + - "num_reltuples": 413 + - }, + - "materialized_views": { + - "toast_size": 8192, + - "indexes_size": 0, + - "num_relations": 1, + - "num_reltuples": 0 + - }, + - "partitioned_tables": { + - "heap_size": 98304, + - "toast_size": 0, + - "indexes_size": 0, + - "num_children": 6, + - "num_relations": 1, + - "num_reltuples": 697 + - }, + - "continuous_aggregates": { + - "heap_size": 180224, + - "toast_size": 24576, + - "compression": { + - "compressed_heap_size": 40960, + - "compressed_row_count": 10, + - "num_compressed_caggs": 1, + - "compressed_toast_size": 8192, + - "num_compressed_chunks": 1, + - "uncompressed_heap_size": 49152, + - "uncompressed_row_count": 452, + - "compressed_indexes_size": 16384, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 81920 + - }, + - "indexes_size": 180224, + - "num_children": 4, + - "num_relations": 2, + - "num_reltuples": 0, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 1, + - "num_caggs_on_distributed_hypertables": 0, + - "num_caggs_using_real_time_aggregation": 1 + - }, + - "distributed_hypertables_data_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - }, + - "distributed_hypertables_access_node": { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } + - } -(1 row) - --- Add distributed hypertables -\set DN_DBNAME_1 :TEST_DBNAME _1 -\set DN_DBNAME_2 :TEST_DBNAME _2 --- Not an access node or data node -SELECT r -> 'num_data_nodes' AS num_data_nodes, - r -> 'distributed_member' AS distributed_member -FROM telemetry_report; - num_data_nodes | distributed_member -----------------+-------------------- - | "none" -(1 row) - --- Become an access node by adding a data node -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); - node_name | database | node_created | database_created | extension_created --------------+----------------------+--------------+------------------+------------------- - data_node_1 | db_telemetry_stats_1 | t | t | t -(1 row) - --- Telemetry should show one data node and "acces node" status -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT r -> 'num_data_nodes' AS num_data_nodes, - r -> 'distributed_member' AS distributed_member -FROM telemetry_report; - num_data_nodes | distributed_member -----------------+-------------------- - 1 | "access node" -(1 row) - --- See telemetry report from a data node -\ir include/remote_exec.sql --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. -CREATE SCHEMA IF NOT EXISTS test; -psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping -GRANT USAGE ON SCHEMA test TO PUBLIC; -CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) -RETURNS VOID -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' -LANGUAGE C; -CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) -RETURNS TABLE("table_record" CSTRING[]) -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' -LANGUAGE C; -SELECT test.remote_exec(NULL, $$ - SELECT t -> 'num_data_nodes' AS num_data_nodes, - t -> 'distributed_member' AS distributed_member - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT t -> 'num_data_nodes' AS num_data_nodes, - t -> 'distributed_member' AS distributed_member - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -num_data_nodes|distributed_member ---------------+------------------ - |"data node" -(1 row) - - - remote_exec -------------- - -(1 row) - -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); - node_name | database | node_created | database_created | extension_created --------------+----------------------+--------------+------------------+------------------- - data_node_2 | db_telemetry_stats_2 | t | t | t -(1 row) - -CREATE TABLE disthyper (LIKE normal); -SELECT create_distributed_hypertable('disthyper', 'time', 'device'); - create_distributed_hypertable -------------------------------- - (6,public,disthyper,t) -(1 row) - --- Show distributed hypertables stats with no data -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 1, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- No datanode-related stats on the access node -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn -FROM relations; - distributed_hypertables_dn ------------------------------------------ - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0,+ - "num_compressed_hypertables": 0+ - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - } -(1 row) - --- Insert data into the distributed hypertable -INSERT INTO disthyper -SELECT * FROM normal; --- Update telemetry stats and show output on access node and data --- nodes. Note that the access node doesn't store data so shows --- zero. It should have stats from ANALYZE, though, like --- num_reltuples. -ANALYZE disthyper; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 18, + - "num_relations": 1, + - "num_reltuples": 697, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- Show data node stats -SELECT test.remote_exec(NULL, $$ - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -distributed_hypertables_dn --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -{ - "heap_size": 73728, - "toast_size": 0, - "compression": { - "compressed_heap_size": 0, - "compressed_row_count": 0, - "compressed_toast_size": 0, - "num_compressed_chunks": 0, - "uncompressed_heap_size": 0, - "uncompressed_row_count": 0, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 0, - "num_compressed_hypertables": 0 - }, - "indexes_size": 311296, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 368 -} -(1 row) - - -NOTICE: [data_node_2]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_2]: -distributed_hypertables_dn --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -{ - "heap_size": 73728, - "toast_size": 0, - "compression": { - "compressed_heap_size": 0, - "compressed_row_count": 0, - "compressed_toast_size": 0, - "num_compressed_chunks": 0, - "uncompressed_heap_size": 0, - "uncompressed_row_count": 0, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 0, - "num_compressed_hypertables": 0 - }, - "indexes_size": 311296, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 329 -} -(1 row) - - - remote_exec -------------- - -(1 row) - --- Add compression -ALTER TABLE disthyper SET (timescaledb.compress); -SELECT compress_chunk(c) -FROM show_chunks('disthyper') c ORDER BY c LIMIT 4; - compress_chunk ----------------------------------------------- - _timescaledb_internal._dist_hyper_6_19_chunk - _timescaledb_internal._dist_hyper_6_20_chunk - _timescaledb_internal._dist_hyper_6_21_chunk - _timescaledb_internal._dist_hyper_6_22_chunk -(4 rows) - -ANALYZE disthyper; --- Update telemetry stats and show updated compression stats -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 0, + - "num_children": 18, + - "num_relations": 1, + - "num_reltuples": 581, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- Show data node stats -SELECT test.remote_exec(NULL, $$ - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -distributed_hypertables_dn ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -{ - "heap_size": 73728, - "toast_size": 16384, - "compression": { - "compressed_heap_size": 16384, - "compressed_row_count": 2, - "compressed_toast_size": 16384, - "num_compressed_chunks": 2, - "uncompressed_heap_size": 16384, - "uncompressed_row_count": 56, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 65536, - "num_compressed_hypertables": 1 - }, - "indexes_size": 278528, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 312 -} -(1 row) - - -NOTICE: [data_node_2]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_2]: -distributed_hypertables_dn ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -{ - "heap_size": 73728, - "toast_size": 16384, - "compression": { - "compressed_heap_size": 16384, - "compressed_row_count": 2, - "compressed_toast_size": 16384, - "num_compressed_chunks": 2, - "uncompressed_heap_size": 16384, - "uncompressed_row_count": 60, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 65536, - "num_compressed_hypertables": 1 - }, - "indexes_size": 278528, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 269 -} -(1 row) - - - remote_exec -------------- - -(1 row) - --- Create a replicated distributed hypertable and show replication stats -CREATE TABLE disthyper_repl (LIKE normal); -SELECT create_distributed_hypertable('disthyper_repl', 'time', 'device', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (7,public,disthyper_repl,t) -(1 row) - -INSERT INTO disthyper_repl -SELECT * FROM normal; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 0, + - "num_children": 36, + - "num_relations": 2, + - "num_reltuples": 581, + - "num_replica_chunks": 18, + - "num_replicated_distributed_hypertables": 1+ - } -(1 row) - --- Create a continuous aggregate on the distributed hypertable -CREATE MATERIALIZED VIEW distcontagg -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - disthyper -GROUP BY hour, device; -NOTICE: refreshing continuous aggregate "distcontagg" -CREATE MATERIALIZED VIEW distcontagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - disthyper -GROUP BY hour, device; -NOTICE: refreshing continuous aggregate "distcontagg_old" -VACUUM; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'continuous_aggregates') AS continuous_aggregates -FROM relations; - continuous_aggregates ------------------------------------------------- - { + - "heap_size": 425984, + - "toast_size": 40960, + - "compression": { + - "compressed_heap_size": 40960, + - "compressed_row_count": 10, + - "num_compressed_caggs": 1, + - "compressed_toast_size": 8192, + - "num_compressed_chunks": 1, + - "uncompressed_heap_size": 49152, + - "uncompressed_row_count": 452, + - "compressed_indexes_size": 16384, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 81920 + - }, + - "indexes_size": 409600, + - "num_children": 8, + - "num_relations": 4, + - "num_reltuples": 2336, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 2, + - "num_caggs_on_distributed_hypertables": 2,+ - "num_caggs_using_real_time_aggregation": 3+ - } -(1 row) - --- check telemetry for fixed schedule jobs works -create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ -begin -raise log 'this is job_test_fixed'; -end -$$; -create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ -begin -raise log 'this is job_test_drifting'; -end -$$; --- before adding the jobs -select get_telemetry_report()->'num_user_defined_actions_fixed'; - ?column? ----------- - 0 -(1 row) - -select get_telemetry_report()->'num_user_defined_actions'; - ?column? ----------- - 0 -(1 row) - -select add_job('job_test_fixed', '1 week'); - add_job ---------- - 1000 -(1 row) - -select add_job('job_test_drifting', '1 week', fixed_schedule => false); - add_job ---------- - 1001 -(1 row) - --- add continuous aggregate refresh policy for contagg -select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting - add_continuous_aggregate_policy ---------------------------------- - 1002 -(1 row) - -select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed - add_continuous_aggregate_policy ---------------------------------- - 1003 -(1 row) - --- add retention policy, fixed -select add_retention_policy('hyper', interval '1 year', initial_start => now()); - add_retention_policy ----------------------- - 1004 -(1 row) - --- add compression policy -select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); - add_compression_policy ------------------------- - 1005 -(1 row) - -select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; - uda_fixed | uda_drifting ------------+-------------- - 1 | 1 -(1 row) - -select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; - contagg_fixed | contagg_drifting ----------------+------------------ - 1 | 1 -(1 row) - -select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; - compress_fixed | retention_fixed -----------------+----------------- - 1 | 1 -(1 row) - -DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; -TRUNCATE _timescaledb_internal.job_errors; --- create some "errors" for testing -INSERT INTO -_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) -VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), -(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), -(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'), -(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'), -(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'), --- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions -(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'); --- create some errors for them -INSERT INTO -_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) -values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), -(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), -(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), -(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'), -(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'), -(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'), -(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'); --- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs -SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); - jsonb_pretty ----------------------------------------------- - { + - "policy_retention": { + - "P0001": 1 + - }, + - "policy_compression": { + - "JF009": 1 + - }, + - "user_defined_action": { + - "ABCDE": 1, + - "P0001": 2 + - }, + - "policy_refresh_continuous_aggregate": {+ - "P0001": 2 + - } + - } -(1 row) - --- for job statistics, insert some records into bgw_job_stats -INSERT INTO _timescaledb_internal.bgw_job_stat -values -(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), -(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, -false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); -SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); - jsonb_pretty ------------------------------------------------- - { + - "policy_retention": { + - "total_runs": 1, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 1, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 2 secs",+ - "max_consecutive_failures": 1 + - }, + - "policy_compression": { + - "total_runs": 1, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 1, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 2 secs",+ - "max_consecutive_failures": 1 + - }, + - "user_defined_action": { + - "total_runs": 2, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 2, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 4 secs",+ - "max_consecutive_failures": 1 + - }, + - "policy_refresh_continuous_aggregate": { + - "total_runs": 2, + - "total_crashes": 0, + - "total_duration": "@ 0", + - "total_failures": 2, + - "total_successes": 0, + - "max_consecutive_crashes": 0, + - "total_duration_failures": "@ 4 secs",+ - "max_consecutive_failures": 1 + - } + - } -(1 row) - --- create nested continuous aggregates - copied from cagg_on_cagg_common -CREATE TABLE conditions ( - time timestamptz NOT NULL, - temperature int -); -SELECT create_hypertable('conditions', 'time'); - create_hypertable --------------------------- - (10,public,conditions,t) -(1 row) - -CREATE MATERIALIZED VIEW conditions_summary_hourly_1 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - time_bucket('1 hour', "time") AS bucket, - SUM(temperature) AS temperature -FROM conditions -GROUP BY 1 -WITH NO DATA; -CREATE MATERIALIZED VIEW conditions_summary_daily_2 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - time_bucket('1 day', "bucket") AS bucket, - SUM(temperature) AS temperature -FROM conditions_summary_hourly_1 -GROUP BY 1 -WITH NO DATA; -CREATE MATERIALIZED VIEW conditions_summary_weekly_3 -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - time_bucket('1 week', "bucket") AS bucket, - SUM(temperature) AS temperature -FROM conditions_summary_daily_2 -GROUP BY 1 -WITH NO DATA; -SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); - jsonb_pretty --------------- - 2 -(1 row) - -DROP VIEW relations; -DROP MATERIALIZED VIEW telemetry_report; -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -DROP DATABASE :DN_DBNAME_1 WITH (FORCE); -DROP DATABASE :DN_DBNAME_2 WITH (FORCE); diff --git a/tsl/test/expected/telemetry_stats-13.out b/tsl/test/expected/telemetry_stats.out similarity index 62% rename from tsl/test/expected/telemetry_stats-13.out rename to tsl/test/expected/telemetry_stats.out index 095d92d7cd7..39f43f7e6eb 100644 --- a/tsl/test/expected/telemetry_stats-13.out +++ b/tsl/test/expected/telemetry_stats.out @@ -505,464 +505,6 @@ SELECT jsonb_pretty(rels) AS relations FROM relations; } (1 row) --- Add distributed hypertables -\set DN_DBNAME_1 :TEST_DBNAME _1 -\set DN_DBNAME_2 :TEST_DBNAME _2 --- Not an access node or data node -SELECT r -> 'num_data_nodes' AS num_data_nodes, - r -> 'distributed_member' AS distributed_member -FROM telemetry_report; - num_data_nodes | distributed_member -----------------+-------------------- - | "none" -(1 row) - --- Become an access node by adding a data node -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); - node_name | database | node_created | database_created | extension_created --------------+----------------------+--------------+------------------+------------------- - data_node_1 | db_telemetry_stats_1 | t | t | t -(1 row) - --- Telemetry should show one data node and "acces node" status -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT r -> 'num_data_nodes' AS num_data_nodes, - r -> 'distributed_member' AS distributed_member -FROM telemetry_report; - num_data_nodes | distributed_member -----------------+-------------------- - 1 | "access node" -(1 row) - --- See telemetry report from a data node -\ir include/remote_exec.sql --- This file and its contents are licensed under the Timescale License. --- Please see the included NOTICE for copyright information and --- LICENSE-TIMESCALE for a copy of the license. -CREATE SCHEMA IF NOT EXISTS test; -psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping -GRANT USAGE ON SCHEMA test TO PUBLIC; -CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) -RETURNS VOID -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' -LANGUAGE C; -CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) -RETURNS TABLE("table_record" CSTRING[]) -AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' -LANGUAGE C; -SELECT test.remote_exec(NULL, $$ - SELECT t -> 'num_data_nodes' AS num_data_nodes, - t -> 'distributed_member' AS distributed_member - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT t -> 'num_data_nodes' AS num_data_nodes, - t -> 'distributed_member' AS distributed_member - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -num_data_nodes|distributed_member ---------------+------------------ - |"data node" -(1 row) - - - remote_exec -------------- - -(1 row) - -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); - node_name | database | node_created | database_created | extension_created --------------+----------------------+--------------+------------------+------------------- - data_node_2 | db_telemetry_stats_2 | t | t | t -(1 row) - -CREATE TABLE disthyper (LIKE normal); -SELECT create_distributed_hypertable('disthyper', 'time', 'device'); - create_distributed_hypertable -------------------------------- - (6,public,disthyper,t) -(1 row) - --- Show distributed hypertables stats with no data -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 1, + - "num_reltuples": 0, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- No datanode-related stats on the access node -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn -FROM relations; - distributed_hypertables_dn ------------------------------------------ - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0,+ - "num_compressed_hypertables": 0+ - }, + - "indexes_size": 0, + - "num_children": 0, + - "num_relations": 0, + - "num_reltuples": 0 + - } -(1 row) - --- Insert data into the distributed hypertable -INSERT INTO disthyper -SELECT * FROM normal; --- Update telemetry stats and show output on access node and data --- nodes. Note that the access node doesn't store data so shows --- zero. It should have stats from ANALYZE, though, like --- num_reltuples. -ANALYZE disthyper; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 0, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 0 + - }, + - "indexes_size": 0, + - "num_children": 18, + - "num_relations": 1, + - "num_reltuples": 697, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- Show data node stats -SELECT test.remote_exec(NULL, $$ - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -distributed_hypertables_dn --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -{ - "heap_size": 73728, - "toast_size": 0, - "compression": { - "compressed_heap_size": 0, - "compressed_row_count": 0, - "compressed_toast_size": 0, - "num_compressed_chunks": 0, - "uncompressed_heap_size": 0, - "uncompressed_row_count": 0, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 0, - "num_compressed_hypertables": 0 - }, - "indexes_size": 311296, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 357 -} -(1 row) - - -NOTICE: [data_node_2]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_2]: -distributed_hypertables_dn --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -{ - "heap_size": 73728, - "toast_size": 0, - "compression": { - "compressed_heap_size": 0, - "compressed_row_count": 0, - "compressed_toast_size": 0, - "num_compressed_chunks": 0, - "uncompressed_heap_size": 0, - "uncompressed_row_count": 0, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 0, - "num_compressed_hypertables": 0 - }, - "indexes_size": 311296, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 340 -} -(1 row) - - - remote_exec -------------- - -(1 row) - --- Add compression -ALTER TABLE disthyper SET (timescaledb.compress); -SELECT compress_chunk(c) -FROM show_chunks('disthyper') c ORDER BY c LIMIT 4; - compress_chunk ----------------------------------------------- - _timescaledb_internal._dist_hyper_6_19_chunk - _timescaledb_internal._dist_hyper_6_20_chunk - _timescaledb_internal._dist_hyper_6_21_chunk - _timescaledb_internal._dist_hyper_6_22_chunk -(4 rows) - -ANALYZE disthyper; --- Update telemetry stats and show updated compression stats -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 0, + - "num_children": 18, + - "num_relations": 1, + - "num_reltuples": 581, + - "num_replica_chunks": 0, + - "num_replicated_distributed_hypertables": 0+ - } -(1 row) - --- Show data node stats -SELECT test.remote_exec(NULL, $$ - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t; -$$); -NOTICE: [data_node_1]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_1]: -distributed_hypertables_dn ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -{ - "heap_size": 73728, - "toast_size": 16384, - "compression": { - "compressed_heap_size": 16384, - "compressed_row_count": 2, - "compressed_toast_size": 16384, - "num_compressed_chunks": 2, - "uncompressed_heap_size": 16384, - "uncompressed_row_count": 72, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 65536, - "num_compressed_hypertables": 1 - }, - "indexes_size": 278528, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 285 -} -(1 row) - - -NOTICE: [data_node_2]: - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t -NOTICE: [data_node_2]: -distributed_hypertables_dn ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -{ - "heap_size": 73728, - "toast_size": 16384, - "compression": { - "compressed_heap_size": 16384, - "compressed_row_count": 2, - "compressed_toast_size": 16384, - "num_compressed_chunks": 2, - "uncompressed_heap_size": 16384, - "uncompressed_row_count": 44, - "compressed_indexes_size": 0, - "uncompressed_toast_size": 0, - "uncompressed_indexes_size": 65536, - "num_compressed_hypertables": 1 - }, - "indexes_size": 278528, - "num_children": 9, - "num_relations": 1, - "num_reltuples": 296 -} -(1 row) - - - remote_exec -------------- - -(1 row) - --- Create a replicated distributed hypertable and show replication stats -CREATE TABLE disthyper_repl (LIKE normal); -SELECT create_distributed_hypertable('disthyper_repl', 'time', 'device', replication_factor => 2); - create_distributed_hypertable -------------------------------- - (7,public,disthyper_repl,t) -(1 row) - -INSERT INTO disthyper_repl -SELECT * FROM normal; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - distributed_hypertables_an -------------------------------------------------- - { + - "heap_size": 0, + - "toast_size": 0, + - "compression": { + - "compressed_heap_size": 0, + - "compressed_row_count": 0, + - "compressed_toast_size": 0, + - "num_compressed_chunks": 4, + - "uncompressed_heap_size": 0, + - "uncompressed_row_count": 0, + - "compressed_indexes_size": 0, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 0, + - "num_compressed_hypertables": 1 + - }, + - "indexes_size": 0, + - "num_children": 36, + - "num_relations": 2, + - "num_reltuples": 581, + - "num_replica_chunks": 18, + - "num_replicated_distributed_hypertables": 1+ - } -(1 row) - --- Create a continuous aggregate on the distributed hypertable -CREATE MATERIALIZED VIEW distcontagg -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - disthyper -GROUP BY hour, device; -NOTICE: refreshing continuous aggregate "distcontagg" -CREATE MATERIALIZED VIEW distcontagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - disthyper -GROUP BY hour, device; -NOTICE: refreshing continuous aggregate "distcontagg_old" -VACUUM; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'continuous_aggregates') AS continuous_aggregates -FROM relations; - continuous_aggregates ------------------------------------------------- - { + - "heap_size": 425984, + - "toast_size": 40960, + - "compression": { + - "compressed_heap_size": 40960, + - "compressed_row_count": 10, + - "num_compressed_caggs": 1, + - "compressed_toast_size": 8192, + - "num_compressed_chunks": 1, + - "uncompressed_heap_size": 49152, + - "uncompressed_row_count": 452, + - "compressed_indexes_size": 16384, + - "uncompressed_toast_size": 0, + - "uncompressed_indexes_size": 81920 + - }, + - "indexes_size": 409600, + - "num_children": 8, + - "num_relations": 4, + - "num_reltuples": 2336, + - "num_caggs_nested": 0, + - "num_caggs_finalized": 2, + - "num_caggs_on_distributed_hypertables": 2,+ - "num_caggs_using_real_time_aggregation": 3+ - } -(1 row) - -- check telemetry for fixed schedule jobs works create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ begin @@ -1155,9 +697,9 @@ CREATE TABLE conditions ( temperature int ); SELECT create_hypertable('conditions', 'time'); - create_hypertable --------------------------- - (10,public,conditions,t) + create_hypertable +------------------------- + (6,public,conditions,t) (1 row) CREATE MATERIALIZED VIEW conditions_summary_hourly_1 @@ -1192,6 +734,3 @@ SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggrega DROP VIEW relations; DROP MATERIALIZED VIEW telemetry_report; -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -DROP DATABASE :DN_DBNAME_1 WITH (FORCE); -DROP DATABASE :DN_DBNAME_2 WITH (FORCE); diff --git a/tsl/test/isolation/expected/dist_ha_chunk_drop.out b/tsl/test/isolation/expected/dist_ha_chunk_drop.out index 8197955519c..47fc24de146 100644 --- a/tsl/test/isolation/expected/dist_ha_chunk_drop.out +++ b/tsl/test/isolation/expected/dist_ha_chunk_drop.out @@ -27,6 +27,8 @@ t (1 row) step s1_init: INSERT INTO metric1(ts, val, dev_id) SELECT s.*, 3.14, d.* FROM generate_series('2021-08-17 00:00:00'::timestamp, '2021-08-17 00:00:59'::timestamp, '1 s'::interval) s CROSS JOIN generate_series(1, 500) d; +s1: WARNING: altering data node is deprecated +DETAIL: Multi-node is deprecated and will be removed in future releases. step s1_set_unavailable: SELECT alter_data_node('data_node_4', available=>false); alter_data_node -------------------------------------- @@ -49,6 +51,8 @@ debug_waitpoint_release step s1_insert: <... completed> step s2_insert: <... completed> +s1: WARNING: altering data node is deprecated +DETAIL: Multi-node is deprecated and will be removed in future releases. step s1_set_available: SELECT alter_data_node('data_node_4', available=>true); alter_data_node -------------------------------------- diff --git a/tsl/test/isolation/specs/CMakeLists.txt b/tsl/test/isolation/specs/CMakeLists.txt index 766f5d22ca5..6868629e72e 100644 --- a/tsl/test/isolation/specs/CMakeLists.txt +++ b/tsl/test/isolation/specs/CMakeLists.txt @@ -4,7 +4,6 @@ set(TEST_TEMPLATES_MODULE reorder_deadlock.spec.in set(TEST_TEMPLATES_MODULE_DEBUG reorder_vs_insert.spec.in reorder_vs_select.spec.in - dist_su_copy_chunk.spec.in dist_cmd_exec.spec.in decompression_chunk_and_parallel_query.in) list( @@ -18,7 +17,7 @@ list( deadlock_drop_chunks_compress.spec osm_range_updates_iso.spec) -if(ENABLE_MULTINODE_TESTS) +if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16") list(APPEND TEST_FILES cagg_concurrent_refresh_dist_ht.spec) endif() @@ -28,7 +27,7 @@ endif() if(CMAKE_BUILD_TYPE MATCHES Debug) list(APPEND TEST_TEMPLATES_MODULE ${TEST_TEMPLATES_MODULE_DEBUG}) - list(APPEND TEST_FILES cagg_drop_chunks_iso.spec compression_chunk_race.spec + list(APPEND TEST_FILES compression_chunk_race.spec compression_merge_race.spec decompression_chunk_and_parallel_query_wo_idx.spec) if(PG_VERSION VERSION_GREATER_EQUAL "14.0") @@ -43,15 +42,18 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) list(APPEND TEST_FILES deadlock_recompress_chunk.spec) endif() - if(ENABLE_MULTINODE_TESTS) + if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16") list( APPEND TEST_FILES cagg_multi_dist_ht.spec - dist_ha_chunk_drop.spec + cagg_drop_chunks_iso.spec dist_ha_chunk_drop.spec dist_restore_point.spec remote_create_chunk.spec) + + list(APPEND TEST_TEMPLATES_MODULE_DEBUG dist_cmd_exec.spec.in + dist_su_copy_chunk.spec.in) endif() endif(CMAKE_BUILD_TYPE MATCHES Debug) diff --git a/tsl/test/isolation/specs/dist_restore_point.spec b/tsl/test/isolation/specs/dist_restore_point.spec index a9593050466..8688f55d095 100644 --- a/tsl/test/isolation/specs/dist_restore_point.spec +++ b/tsl/test/isolation/specs/dist_restore_point.spec @@ -40,6 +40,7 @@ setup { SET TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name = 's1'; + SET client_min_messages = 'ERROR'; } step "s1_create_dist_rp" { SELECT restore_point > pg_lsn('0/0') as valid_lsn FROM create_distributed_restore_point('s1_test'); } @@ -49,7 +50,7 @@ setup { SET TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name = 's2'; - SET client_min_messages TO warning; + SET client_min_messages = 'ERROR'; } step "s2_create_dist_rp" { SELECT restore_point > pg_lsn('0/0') as valid_lsn FROM create_distributed_restore_point('s2_test'); } step "s2_insert" { INSERT INTO disttable VALUES ('2019-08-02 10:45', 0, 0.0); } @@ -68,6 +69,7 @@ setup { SET TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name = 's3'; + SET client_min_messages = 'ERROR'; } step "s3_lock_enable" { SELECT debug_waitpoint_enable('create_distributed_restore_point_lock'); } step "s3_lock_release" { SELECT debug_waitpoint_release('create_distributed_restore_point_lock'); } diff --git a/tsl/test/shared/expected/gapfill.out b/tsl/test/shared/expected/gapfill-13.out similarity index 100% rename from tsl/test/shared/expected/gapfill.out rename to tsl/test/shared/expected/gapfill-13.out diff --git a/tsl/test/shared/expected/gapfill-14.out b/tsl/test/shared/expected/gapfill-14.out new file mode 100644 index 00000000000..d2e2e4ec598 --- /dev/null +++ b/tsl/test/shared/expected/gapfill-14.out @@ -0,0 +1,3366 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set EXPLAIN 'EXPLAIN (COSTS OFF)' +-- we want to see error details in the output +\set VERBOSITY default +CREATE TABLE gapfill_plan_test(time timestamptz NOT NULL, value float); +SELECT table_name FROM create_hypertable('gapfill_plan_test','time',chunk_time_interval=>'4 weeks'::interval); + table_name + gapfill_plan_test +(1 row) + +INSERT INTO gapfill_plan_test SELECT generate_series('2018-01-01'::timestamptz,'2018-04-01'::timestamptz,'1m'::interval), 1.0; +-- simple example +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Values Scan on "*VALUES*" +(6 rows) + +-- test sorting +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1 +ORDER BY 2; +QUERY PLAN + Sort + Sort Key: (avg("*VALUES*".column2)) + -> Custom Scan (GapFill) + -> GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Values Scan on "*VALUES*" +(8 rows) + +-- test sort direction +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1 +ORDER BY 1 DESC; +QUERY PLAN + Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) DESC + -> Custom Scan (GapFill) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) NULLS FIRST + -> HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now()) + -> Values Scan on "*VALUES*" +(8 rows) + +-- test order by aggregate function +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1 +ORDER BY 2,1; +QUERY PLAN + Sort + Sort Key: (avg("*VALUES*".column2)), (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Custom Scan (GapFill) + -> GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Values Scan on "*VALUES*" +(8 rows) + +-- test query without order by +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Values Scan on "*VALUES*" +(6 rows) + +-- test parallel query +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + avg(value) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Finalize GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Partial HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Parallel Append + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk +(15 rows) + +-- test parallel query with locf +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + locf(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Finalize GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Partial HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Parallel Append + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk +(15 rows) + +-- test parallel query with interpolate +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + interpolate(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Finalize GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Partial HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Parallel Append + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk +(15 rows) + +-- make sure we can run gapfill in parallel workers +-- ensure this plan runs in parallel +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + interpolate(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 2 +LIMIT 1; +QUERY PLAN + Limit + -> Sort + Sort Key: (interpolate(avg(gapfill_plan_test.value), NULL::record, NULL::record)) + -> Custom Scan (GapFill) + -> Finalize GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Partial HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Parallel Append + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk +(18 rows) + +-- actually run a parallel gapfill +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + interpolate(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 2 +LIMIT 1; + time_bucket_gapfill | interpolate +------------------------------+------------- + Mon Jan 01 00:00:00 2018 PST | 1 +(1 row) + +-- test sort optimizations +-- test sort optimization with single member order by, +-- should use index scan (no GapFill node for this one since we're not gapfilling) +:EXPLAIN SELECT time_bucket_gapfill('5m',time),value +FROM gapfill_plan_test +ORDER BY 1; +QUERY PLAN + Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone)) + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(8 rows) + +SET max_parallel_workers_per_gather TO 0; +-- test sort optimizations with locf +:EXPLAIN SELECT time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), locf(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(11 rows) + +-- test sort optimizations with interpolate +:EXPLAIN SELECT time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), interpolate(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(11 rows) + +RESET max_parallel_workers_per_gather; +CREATE INDEX gapfill_plan_test_indx ON gapfill_plan_test(value, time); +-- test sort optimization with ordering by multiple columns and time_bucket_gapfill not last, +-- must not use index scan +:EXPLAIN SELECT time_bucket_gapfill('5m',time),value +FROM gapfill_plan_test +ORDER BY 1,2; +QUERY PLAN + Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone)), _hyper_X_X_chunk.value + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(8 rows) + +-- test sort optimization with ordering by multiple columns and time_bucket as last member, +-- should use index scan +:EXPLAIN SELECT time_bucket_gapfill('5m',time),value +FROM gapfill_plan_test +ORDER BY 2,1; +QUERY PLAN + Sort + Sort Key: _hyper_X_X_chunk.value, (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone)) + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(8 rows) + +\set METRICS metrics_int +-- All test against table :METRICS first +\set ON_ERROR_STOP 0 +-- inverse of previous test query to confirm an error is actually thrown +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3 +FROM :METRICS m1 +WHERE time = 5 +GROUP BY 1,2,3 ORDER BY 2,3,1; +ERROR: division by zero +-- test window functions with multiple column references +SELECT + time_bucket_gapfill(1,time,1,2), + first(min(time),min(time)) OVER () +FROM :METRICS +GROUP BY 1; +ERROR: window functions with multiple column references not supported +-- test with unsupported operator +SELECT + time_bucket_gapfill(1,time) +FROM :METRICS +WHERE time =0 AND time < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- test with 2 tables and where clause doesnt match gapfill argument +SELECT + time_bucket_gapfill(1,m2.time) +FROM :METRICS m, :METRICS m2 +WHERE m.time >=0 AND m.time < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- test inner join and where clause doesnt match gapfill argument +SELECT + time_bucket_gapfill(1,m2.time) +FROM :METRICS m1 INNER JOIN :METRICS m2 ON m1.time=m2.time +WHERE m1.time >=0 AND m1.time < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- test outer join with constraints in join condition +-- not usable as start/stop +SELECT + time_bucket_gapfill(1,m1.time) +FROM :METRICS m1 LEFT OUTER JOIN :METRICS m2 ON m1.time=m2.time AND m1.time >=0 AND m1.time < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +\set ON_ERROR_STOP 1 +\ir include/gapfill_metrics_query.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- test locf lookup query does not trigger when not needed +-- 1/(SELECT 0) will throw an error in the lookup query but in order to not +-- always trigger evaluation it needs to be correlated otherwise postgres will +-- always run it once even if the value is never used +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3 +FROM :METRICS m1 +WHERE time >= 0 AND time < 5 +GROUP BY 1,2,3 ORDER BY 2,3,1; + time | device_id | sensor_id | locf3 +------+-----------+-----------+------- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +-- test locf with correlated subquery +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + avg(value), + locf(min(value)) AS locf, + locf(min(value)::int,23) AS locf1, + locf(min(value)::int,(SELECT 42)) AS locf2, + locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3 +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 2,3,1; + time | device_id | sensor_id | avg | locf | locf1 | locf2 | locf3 +------+-----------+-----------+-----+------+-------+-------+------- + 0 | 1 | 1 | 5 | 5 | 5 | 5 | 5 + 5 | 1 | 1 | | 5 | 5 | 5 | 5 + 10 | 1 | 1 | | 5 | 5 | 5 | 5 + 0 | 1 | 2 | | | 23 | 42 | -100 + 5 | 1 | 2 | 10 | 10 | 10 | 10 | 10 + 10 | 1 | 2 | | 10 | 10 | 10 | 10 +(6 rows) + +-- test locf with correlated subquery and "wrong order" +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + avg(value), + locf(min(value)) AS locf, + locf(min(value),23::float) AS locf1, + locf(min(value),(SELECT 42::float)) AS locf2, + locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3 +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 1,2,3; + time | device_id | sensor_id | avg | locf | locf1 | locf2 | locf3 +------+-----------+-----------+-----+------+-------+-------+------- + 0 | 1 | 1 | 5 | 5 | 5 | 5 | 5 + 0 | 1 | 2 | | | 23 | 42 | -100 + 5 | 1 | 1 | | 5 | 5 | 5 | 5 + 5 | 1 | 2 | 10 | 10 | 10 | 10 | 10 + 10 | 1 | 1 | | 5 | 5 | 5 | 5 + 10 | 1 | 2 | | 10 | 10 | 10 | 10 +(6 rows) + +-- test locf with correlated subquery and window functions +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)), + sum(locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1))) OVER (PARTITION BY device_id, sensor_id ROWS 1 PRECEDING) +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3; + time | device_id | sensor_id | locf | sum +------+-----------+-----------+------+------ + 0 | 1 | 1 | 5 | 5 + 5 | 1 | 1 | 5 | 10 + 10 | 1 | 1 | 5 | 10 + 0 | 1 | 2 | -100 | -100 + 5 | 1 | 2 | 10 | -90 + 10 | 1 | 2 | 10 | 20 +(6 rows) + +-- test JOINs +SELECT + time_bucket_gapfill(1,time,0,5) as time, + device_id, + d.name, + sensor_id, + s.name, + avg(m.value) +FROM :METRICS m +INNER JOIN devices d USING(device_id) +INNER JOIN sensors s USING(sensor_id) +WHERE time BETWEEN 0 AND 5 +GROUP BY 1,2,3,4,5; + time | device_id | name | sensor_id | name | avg +------+-----------+----------+-----------+----------+----- + 0 | 1 | Device 1 | 1 | Sensor 1 | 5 + 1 | 1 | Device 1 | 1 | Sensor 1 | + 2 | 1 | Device 1 | 1 | Sensor 1 | + 3 | 1 | Device 1 | 1 | Sensor 1 | + 4 | 1 | Device 1 | 1 | Sensor 1 | + 0 | 1 | Device 1 | 2 | Sensor 2 | + 1 | 1 | Device 1 | 2 | Sensor 2 | + 2 | 1 | Device 1 | 2 | Sensor 2 | + 3 | 1 | Device 1 | 2 | Sensor 2 | + 4 | 1 | Device 1 | 2 | Sensor 2 | + 5 | 1 | Device 1 | 2 | Sensor 2 | 10 +(11 rows) + +-- test interpolate with correlated subquery +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + avg(value), + interpolate(min(value)) AS ip, + interpolate(min(value),(-5,-5.0::float),(15,20.0::float)) AS ip1, + interpolate(min(value),(SELECT (-10,-10.0::float)),(SELECT (15,20.0::float))) AS ip2, + interpolate( + min(value), + (SELECT (time,value) FROM :METRICS m2 + WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time DESC LIMIT 1), + (SELECT (time,value) FROM :METRICS m2 + WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time LIMIT 1) + ) AS ip3 +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 2,3,1; + time | device_id | sensor_id | avg | ip | ip1 | ip2 | ip3 +------+-----------+-----------+-----+----+-----+------------------+------------------ + 0 | 1 | 1 | 5 | 5 | 5 | 5 | 5 + 5 | 1 | 1 | | | 10 | 10 | 4.75 + 10 | 1 | 1 | | | 15 | 15 | 4.5 + 0 | 1 | 2 | | | 2.5 | 3.33333333333333 | 4.76190476190476 + 5 | 1 | 2 | 10 | 10 | 10 | 10 | 10 + 10 | 1 | 2 | | | 15 | 15 | 4.21052631578947 +(6 rows) + +-- test interpolate with correlated subquery and window function +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + interpolate( + min(value), + (SELECT (time,value) FROM :METRICS m2 + WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time DESC LIMIT 1), + (SELECT (time,value) FROM :METRICS m2 + WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time LIMIT 1) + ), + sum(interpolate( + min(value), + (SELECT (time,value) FROM :METRICS m2 + WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time DESC LIMIT 1), + (SELECT (time,value) FROM :METRICS m2 + WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time LIMIT 1) + )) OVER (PARTITION BY device_id, sensor_id ROWS 1 PRECEDING) +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 2,3,1; + time | device_id | sensor_id | interpolate | sum +------+-----------+-----------+------------------+------------------ + 0 | 1 | 1 | 5 | 5 + 5 | 1 | 1 | 4.75 | 9.75 + 10 | 1 | 1 | 4.5 | 9.25 + 0 | 1 | 2 | 4.76190476190476 | 4.76190476190476 + 5 | 1 | 2 | 10 | 14.7619047619048 + 10 | 1 | 2 | 4.21052631578947 | 14.2105263157895 +(6 rows) + +-- test subqueries +-- subqueries will alter the shape of the plan and top-level constraints +-- might not end up in top-level of jointree +SELECT + time_bucket_gapfill(1,m1.time) +FROM :METRICS m1 +WHERE m1.time >=0 AND m1.time < 2 AND device_id IN (SELECT device_id FROM :METRICS) +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test inner join with constraints in join condition +SELECT + time_bucket_gapfill(1,m2.time) +FROM :METRICS m1 INNER JOIN :METRICS m2 ON m1.time=m2.time AND m2.time >=0 AND m2.time < 2 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test actual table +SELECT + time_bucket_gapfill(1,time) +FROM :METRICS +WHERE time >=0 AND time < 2 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test with table alias +SELECT + time_bucket_gapfill(1,time) +FROM :METRICS m +WHERE m.time >=0 AND m.time < 2 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test with 2 tables +SELECT + time_bucket_gapfill(1,m.time) +FROM :METRICS m, :METRICS m2 +WHERE m.time >=0 AND m.time < 2 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test prepared statement with locf with lookup query +PREPARE prep_gapfill AS +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) +FROM :METRICS m1 +WHERE time >= 0 AND time < 5 +GROUP BY 1,2,3; +-- execute 10 times to make sure turning it into generic plan works +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +DEALLOCATE prep_gapfill; +-- test prepared statement with interpolate with lookup query +PREPARE prep_gapfill AS +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + interpolate( + min(value), + (SELECT (time,value) FROM :METRICS m2 + WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time DESC LIMIT 1), + (SELECT (time,value) FROM :METRICS m2 + WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time LIMIT 1) + ) +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 2,3,1; +-- execute 10 times to make sure turning it into generic plan works +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +DEALLOCATE prep_gapfill; +-- test prepared statement with variable gapfill arguments +PREPARE prep_gapfill(int,int,int) AS +SELECT + time_bucket_gapfill($1,time,$2,$3) AS time, + device_id, + sensor_id, + min(value) +FROM :METRICS m1 +WHERE time >= $2 AND time < $3 AND device_id=1 AND sensor_id=1 +GROUP BY 1,2,3 ORDER BY 2,3,1; +-- execute 10 times to make sure turning it into generic plan works +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +DEALLOCATE prep_gapfill; +-- Tests without tables +-- test locf and interpolate call without gapfill +SELECT locf(1); + locf + 1 +(1 row) + +SELECT interpolate(1); + interpolate + 1 +(1 row) + +-- test locf and interpolate call with NULL input +SELECT locf(NULL::int); + locf + +(1 row) + +SELECT interpolate(NULL::bigint); + interpolate + +(1 row) + +\set ON_ERROR_STOP 0 +-- test time_bucket_gapfill not top level function call +SELECT + 1 + time_bucket_gapfill(1,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: no top level time_bucket_gapfill in group by clause +-- test locf with treat_null_as_missing not BOOL +SELECT + time_bucket_gapfill(1,time,1,11), + locf(min(time),treat_null_as_missing:=1) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: function locf(integer, treat_null_as_missing => integer) does not exist +LINE 3: locf(min(time),treat_null_as_missing:=1) + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +-- test locf with treat_null_as_missing not literal +SELECT + time_bucket_gapfill(1,time,1,11), + locf(min(time),treat_null_as_missing:=random()>0) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid locf argument: treat_null_as_missing must be a BOOL literal +-- test interpolate lookup query with 1 element in record +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),next=>(SELECT ROW(10))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: interpolate RECORD arguments must have 2 elements +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),prev=>(SELECT ROW(10))) +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: interpolate RECORD arguments must have 2 elements +-- test interpolate lookup query with 3 elements in record +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),next=>(SELECT (10,10,10))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: interpolate RECORD arguments must have 2 elements +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),prev=>(SELECT (10,10,10))) +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: interpolate RECORD arguments must have 2 elements +-- test interpolate lookup query with mismatching time datatype +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),next=>(SELECT (10::float,10))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: first argument of interpolate returned record must match used timestamp datatype +DETAIL: Returned type double precision does not match expected type integer. +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),prev=>(SELECT (10::float,10))) +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: first argument of interpolate returned record must match used timestamp datatype +DETAIL: Returned type double precision does not match expected type integer. +-- test interpolate lookup query with mismatching value datatype +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),next=>(SELECT (10,10::float))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: second argument of interpolate returned record must match used interpolate datatype +DETAIL: Returned type double precision does not match expected type integer. +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),prev=>(SELECT (10,10::float))) +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: second argument of interpolate returned record must match used interpolate datatype +DETAIL: Returned type double precision does not match expected type integer. +-- test interpolate with unsupported datatype +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(text 'text') +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: function interpolate(text) does not exist +LINE 3: interpolate(text 'text') + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(interval '1d') +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: function interpolate(interval) does not exist +LINE 3: interpolate(interval '1d') + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +-- test multiple time_bucket_gapfill calls +SELECT + time_bucket_gapfill(1,time,1,11),time_bucket_gapfill(1,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple time_bucket_gapfill calls not allowed +-- test nested time_bucket_gapfill calls +SELECT + time_bucket_gapfill(1,time_bucket_gapfill(1,time,1,11),1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple time_bucket_gapfill calls not allowed +-- test nested locf calls +SELECT + time_bucket_gapfill(1,time,1,11), + locf(locf(min(time))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple interpolate/locf function calls per resultset column not supported +-- test nested interpolate calls +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(interpolate(min(time))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple interpolate/locf function calls per resultset column not supported +-- test mixed locf/interpolate calls +SELECT + time_bucket_gapfill(1,time,1,11), + locf(interpolate(min(time))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple interpolate/locf function calls per resultset column not supported +-- test window function inside locf +SELECT + time_bucket_gapfill(1,time,1,11), + locf(avg(min(time)) OVER ()) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: window functions must not be below locf +-- test nested window functions +-- prevented by postgres +SELECT + time_bucket_gapfill(1,time,1,11), + avg(avg(min(time)) OVER ()) OVER () +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: window function calls cannot be nested +LINE 3: avg(avg(min(time)) OVER ()) OVER () + ^ +-- test multiple window functions in single column +SELECT + time_bucket_gapfill(1,time,1,11), + avg(min(time)) OVER () + avg(min(time)) OVER () +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple window function calls per column not supported +-- test locf not toplevel +SELECT + time_bucket_gapfill(1,time,1,11), + 1 + locf(min(time)) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: locf must be toplevel function call +-- test locf inside aggregate +SELECT + time_bucket_gapfill(1,time,1,11), + min(min(locf(time))) OVER () +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: aggregate functions must be below locf +-- test NULL args +SELECT + time_bucket_gapfill(NULL,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width cannot be NULL +SELECT + time_bucket_gapfill(1,NULL,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts cannot be NULL +SELECT + time_bucket_gapfill(1,time,NULL,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +SELECT + time_bucket_gapfill(1,time,1,NULL) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer finish from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +SELECT + time_bucket_gapfill(NULL,time,'Europe/Berlin','2000-06-01','2001-06-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2001-01-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width cannot be NULL +SELECT + time_bucket_gapfill('1day',NULL,'Europe/Berlin','2000-06-01','2001-06-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2001-01-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts cannot be NULL +SELECT + time_bucket_gapfill('1day',time,NULL,'2000-06-01','2001-06-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2001-01-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: timezone cannot be NULL +-- test 0 bucket_width +SELECT + time_bucket_gapfill(0,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +SELECT + time_bucket_gapfill('0d',time,'2000-01-01','2000-02-01') +FROM (VALUES ('2000-01-01'::date),('2000-02-01'::date)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +SELECT + time_bucket_gapfill('0d',time,'2000-01-01','2000-02-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2000-02-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +-- test negative bucket_width +SELECT + time_bucket_gapfill(-1,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +SELECT + time_bucket_gapfill('-1d',time,'2000-01-01','2000-02-01') +FROM (VALUES ('2000-01-01'::date),('2000-02-01'::date)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +SELECT + time_bucket_gapfill('-1d',time,'2000-01-01','2000-02-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2000-02-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +-- test subqueries as interval, start and stop (not supported atm) +SELECT + time_bucket_gapfill((SELECT 1),time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be a simple expression +SELECT + time_bucket_gapfill(1,time,(SELECT 1),11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start must be a simple expression +SELECT + time_bucket_gapfill(1,time,1,(SELECT 11)) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish must be a simple expression +\set ON_ERROR_STOP 1 +-- test time_bucket_gapfill without aggregation +-- this will not trigger gapfilling +SELECT + time_bucket_gapfill(1,time,1,11) +FROM (VALUES (1),(2)) v(time); + time_bucket_gapfill + 1 + 2 +(2 rows) + +SELECT + time_bucket_gapfill(1,time,1,11), + avg(time) OVER () +FROM (VALUES (1),(2)) v(time); + time_bucket_gapfill | avg +---------------------+-------------------- + 1 | 1.5000000000000000 + 2 | 1.5000000000000000 +(2 rows) + +-- test int int2/4/8 +SELECT + time_bucket_gapfill(1::int2,time::int2,0::int2,6::int2) +FROM (VALUES (1),(4)) v(time) +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 + 3 + 4 + 5 +(6 rows) + +SELECT + time_bucket_gapfill(1::int4,time::int4,0::int4,6::int4) +FROM (VALUES (1),(4)) v(time) +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 + 3 + 4 + 5 +(6 rows) + +SELECT + time_bucket_gapfill(1::int8,time::int8,0::int8,6::int8) +FROM (VALUES (1),(4)) v(time) +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 + 3 + 4 + 5 +(6 rows) + +-- test non-aligned bucket start +SELECT + time_bucket_gapfill(10,time,5,40) +FROM (VALUES (11),(22)) v(time) +GROUP BY 1; + time_bucket_gapfill + 0 + 10 + 20 + 30 +(4 rows) + +-- simple gapfill query +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + min(value) AS value +FROM (values (-10,1),(10,2),(11,3),(12,4),(22,5),(30,6),(66,7)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + -10 | 1 + 0 | + 10 | 2 + 20 | 5 + 30 | 6 + 40 | + 60 | 7 +(7 rows) + +-- test references to different columns +SELECT + time_bucket_gapfill(1,t,0,5) as t, + min(t),max(t),min(v),max(v) +FROM(VALUES (1,3),(2,5)) tb(t,v) +GROUP BY 1 ORDER BY 1; + t | min | max | min | max +---+-----+-----+-----+----- + 0 | | | | + 1 | 1 | 1 | 3 | 3 + 2 | 2 | 2 | 5 | 5 + 3 | | | | + 4 | | | | +(5 rows) + +-- test passing of values outside boundaries +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) +FROM (VALUES (-1),(1),(3),(6)) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +---------------------+----- + -1 | -1 + 0 | + 1 | 1 + 2 | + 3 | 3 + 4 | + 6 | 6 +(7 rows) + +-- test gap fill before first row and after last row +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) +FROM (VALUES (1),(2),(3)) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +---------------------+----- + 0 | + 1 | 1 + 2 | 2 + 3 | 3 + 4 | +(5 rows) + +-- test gap fill without rows in resultset +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) +FROM (VALUES (1),(2),(3)) v(time) +WHERE false +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +---------------------+----- + 0 | + 1 | + 2 | + 3 | + 4 | +(5 rows) + +-- test coalesce +SELECT + time_bucket_gapfill(1,time,0,5), + coalesce(min(time),0), + coalesce(min(value),0), + coalesce(min(value),7) +FROM (VALUES (1,1),(2,2),(3,3)) v(time,value) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | coalesce | coalesce | coalesce +---------------------+----------+----------+---------- + 0 | 0 | 0 | 7 + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 + 4 | 0 | 0 | 7 +(5 rows) + +-- test case +SELECT + time_bucket_gapfill(1,time,0,5), + min(time), + CASE WHEN min(time) IS NOT NULL THEN min(time) ELSE -1 END, + CASE WHEN min(time) IS NOT NULL THEN min(time) + 7 ELSE 0 END, + CASE WHEN 1 = 1 THEN 1 ELSE 0 END +FROM (VALUES (1,1),(2,2),(3,3)) v(time,value) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min | case | case | case +---------------------+-----+------+------+------ + 0 | | -1 | 0 | 1 + 1 | 1 | 1 | 8 | 1 + 2 | 2 | 2 | 9 | 1 + 3 | 3 | 3 | 10 | 1 + 4 | | -1 | 0 | 1 +(5 rows) + +-- test constants +SELECT + time_bucket_gapfill(1,time,0,5), + min(time), min(time), 4 as c +FROM (VALUES (1),(2),(3)) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min | min | c +---------------------+-----+-----+--- + 0 | | | 4 + 1 | 1 | 1 | 4 + 2 | 2 | 2 | 4 + 3 | 3 | 3 | 4 + 4 | | | 4 +(5 rows) + +-- test column reordering +SELECT + 1 as c1, '2' as c2, + time_bucket_gapfill(1,time,0,5), + 3.0 as c3, + min(time), min(time), 4 as c4 +FROM (VALUES (1),(2),(3)) v(time) +GROUP BY 3 ORDER BY 3; + c1 | c2 | time_bucket_gapfill | c3 | min | min | c4 +----+----+---------------------+-----+-----+-----+---- + 1 | 2 | 0 | 3.0 | | | 4 + 1 | 2 | 1 | 3.0 | 1 | 1 | 4 + 1 | 2 | 2 | 3.0 | 2 | 2 | 4 + 1 | 2 | 3 | 3.0 | 3 | 3 | 4 + 1 | 2 | 4 | 3.0 | | | 4 +(5 rows) + +-- test timestamptz +SELECT + time_bucket_gapfill(INTERVAL '6h',time,TIMESTAMPTZ '2000-01-01',TIMESTAMPTZ '2000-01-02'), + min(time) +FROM (VALUES (TIMESTAMPTZ '2000-01-01 9:00:00'),(TIMESTAMPTZ '2000-01-01 18:00:00')) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +------------------------------+------------------------------ + Fri Dec 31 22:00:00 1999 PST | + Sat Jan 01 04:00:00 2000 PST | Sat Jan 01 09:00:00 2000 PST + Sat Jan 01 10:00:00 2000 PST | + Sat Jan 01 16:00:00 2000 PST | Sat Jan 01 18:00:00 2000 PST + Sat Jan 01 22:00:00 2000 PST | +(5 rows) + +-- test timestamp +SELECT + time_bucket_gapfill(INTERVAL '6h',time,TIMESTAMP '2000-01-01',TIMESTAMP '2000-01-02'), + min(time) +FROM (VALUES (TIMESTAMP '2000-01-01 9:00:00'),(TIMESTAMP '2000-01-01 18:00:00')) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +--------------------------+-------------------------- + Sat Jan 01 00:00:00 2000 | + Sat Jan 01 06:00:00 2000 | Sat Jan 01 09:00:00 2000 + Sat Jan 01 12:00:00 2000 | + Sat Jan 01 18:00:00 2000 | Sat Jan 01 18:00:00 2000 +(4 rows) + +-- test date +SELECT + time_bucket_gapfill(INTERVAL '1w',time,DATE '2000-01-01',DATE '2000-02-10'), + min(time) +FROM (VALUES (DATE '2000-01-08'),(DATE '2000-01-22')) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +---------------------+------------ + 12-27-1999 | + 01-03-2000 | 01-08-2000 + 01-10-2000 | + 01-17-2000 | 01-22-2000 + 01-24-2000 | + 01-31-2000 | + 02-07-2000 | +(7 rows) + +-- test grouping by non-time columns +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m +FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +GROUP BY 1,id ORDER BY 2,1; + time | id | m +------+----+--- + 0 | 1 | + 1 | 1 | 1 + 2 | 1 | + 3 | 1 | + 4 | 1 | + 0 | 2 | + 1 | 2 | + 2 | 2 | 2 + 3 | 2 | + 4 | 2 | +(10 rows) + +-- test grouping by non-time columns with no rows in resultset +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m +FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +WHERE false +GROUP BY 1,id ORDER BY 2,1; + time | id | m +------+----+--- +(0 rows) + +-- test duplicate columns in GROUP BY +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + id, + min(value) as m +FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +GROUP BY 1,2,3 ORDER BY 2,1; + time | id | id | m +------+----+----+--- + 0 | 1 | 1 | + 1 | 1 | 1 | 1 + 2 | 1 | 1 | + 3 | 1 | 1 | + 4 | 1 | 1 | + 0 | 2 | 2 | + 1 | 2 | 2 | + 2 | 2 | 2 | 2 + 3 | 2 | 2 | + 4 | 2 | 2 | +(10 rows) + +-- test grouping by columns not in resultset +SELECT + time_bucket_gapfill(1,time,0,5) as time, + min(value) as m +FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +GROUP BY 1,id ORDER BY id,1; + time | m +------+--- + 0 | + 1 | 1 + 2 | + 3 | + 4 | + 0 | + 1 | + 2 | 2 + 3 | + 4 | +(10 rows) + +-- test grouping by non-time columns with text columns +SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + min(value) as m +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color ORDER BY 2,1; + time | color | m +------+-------+--- + 0 | blue | + 1 | blue | 1 + 2 | blue | + 3 | blue | + 4 | blue | + 0 | red | + 1 | red | + 2 | red | 2 + 3 | red | + 4 | red | +(10 rows) + +-- test grouping by non-time columns with text columns with no rows in resultset +SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + min(value) as m +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +WHERE false +GROUP BY 1,color ORDER BY 2,1; + time | color | m +------+-------+--- +(0 rows) + +--- test insert into SELECT +CREATE TABLE gapfill_insert_test(id INT); +INSERT INTO gapfill_insert_test SELECT time_bucket_gapfill(1,time,1,5) FROM (VALUES (1),(2)) v(time) GROUP BY 1 ORDER BY 1; +SELECT * FROM gapfill_insert_test; + id + 1 + 2 + 3 + 4 +(4 rows) + +-- test join +SELECT t1.*,t2.m FROM +( + SELECT + time_bucket_gapfill(1,time,0,5) as time, color, min(value) as m + FROM + (VALUES (1,'red',1),(2,'blue',2)) v(time,color,value) + GROUP BY 1,color ORDER BY 2,1 +) t1 INNER JOIN +( + SELECT + time_bucket_gapfill(1,time,0,5) as time, color, min(value) as m + FROM + (VALUES (3,'red',1),(4,'blue',2)) v(time,color,value) + GROUP BY 1,color ORDER BY 2,1 +) t2 ON t1.time = t2.time AND t1.color=t2.color; + time | color | m | m +------+-------+---+--- + 0 | blue | | + 1 | blue | | + 2 | blue | 2 | + 3 | blue | | + 4 | blue | | 2 + 0 | red | | + 1 | red | 1 | + 2 | red | | + 3 | red | | 1 + 4 | red | | +(10 rows) + +-- test join with locf +SELECT t1.*,t2.m FROM +( + SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + locf(min(value)) as locf + FROM + (VALUES (0,'red',1),(0,'blue',2)) v(time,color,value) + GROUP BY 1,color ORDER BY 2,1 +) t1 INNER JOIN +( + SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + locf(min(value)) as m + FROM + (VALUES (3,'red',1),(4,'blue',2)) v(time,color,value) + GROUP BY 1,color ORDER BY 2,1 +) t2 ON t1.time = t2.time AND t1.color=t2.color; + time | color | locf | m +------+-------+------+--- + 0 | blue | 2 | + 1 | blue | 2 | + 2 | blue | 2 | + 3 | blue | 2 | + 4 | blue | 2 | 2 + 0 | red | 1 | + 1 | red | 1 | + 2 | red | 1 | + 3 | red | 1 | 1 + 4 | red | 1 | 1 +(10 rows) + +-- test locf +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value)) AS value +FROM (values (10,9),(20,3),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | 3 + 40 | 3 + 50 | 6 +(6 rows) + +-- test locf with NULLs in resultset +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value)) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | + 40 | + 50 | 6 +(6 rows) + +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=false) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | + 40 | + 50 | 6 +(6 rows) + +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=NULL) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | + 40 | + 50 | 6 +(6 rows) + +-- test locf with NULLs in resultset and treat_null_as_missing +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=true) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | 3 + 40 | 3 + 50 | 6 +(6 rows) + +-- test locf with NULLs in first row of resultset and treat_null_as_missing with lookup query +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=false, prev := (SELECT 100)) AS v1, + locf(min(value),treat_null_as_missing:=true, prev := (SELECT 100)) AS v2 +FROM (values (0,NULL),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | v1 | v2 +------+----+----- + 0 | | 100 + 10 | | 100 + 20 | | 100 + 30 | | 100 + 40 | | 100 + 50 | 6 | 6 +(6 rows) + +-- test locf with NULLs in resultset and treat_null_as_missing with resort +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=true) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1 DESC; + time | value +------+------- + 50 | 6 + 40 | 3 + 30 | 3 + 20 | 3 + 10 | 9 + 0 | +(6 rows) + +-- test locf with constants +SELECT + time_bucket_gapfill(1,time,0,5), + 2, + locf(min(value)) +FROM (VALUES (0,1,3),(4,2,3)) v(time,value) +GROUP BY 1; + time_bucket_gapfill | ?column? | locf +---------------------+----------+------ + 0 | 2 | 1 + 1 | 2 | 1 + 2 | 2 | 1 + 3 | 2 | 1 + 4 | 2 | 2 +(5 rows) + +-- test expressions inside locf +SELECT + time_bucket_gapfill(1,time,0,5), + locf(min(value)), + locf(4), + locf(4 + min(value)) +FROM (VALUES (0,1,3),(4,2,3)) v(time,value) +GROUP BY 1; + time_bucket_gapfill | locf | locf | locf +---------------------+------+------+------ + 0 | 1 | 4 | 5 + 1 | 1 | 4 | 5 + 2 | 1 | 4 | 5 + 3 | 1 | 4 | 5 + 4 | 2 | 4 | 6 +(5 rows) + +-- test locf with out of boundary lookup +SELECT + time_bucket_gapfill(10,time,0,70) AS time, + locf(min(value),(SELECT 100)) AS value +FROM (values (20,9),(40,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | 100 + 10 | 100 + 20 | 9 + 30 | 9 + 40 | 6 + 50 | 6 + 60 | 6 +(7 rows) + +-- test locf with different datatypes +SELECT + time_bucket_gapfill(1,time,0,5) as time, + locf(min(v1)) AS text, + locf(min(v2)) AS "int[]", + locf(min(v3)) AS "text 4/8k" +FROM (VALUES + (1,'foo',ARRAY[1,2,3],repeat('4k',2048)), + (3,'bar',ARRAY[3,4,5],repeat('8k',4096)) +) v(time,v1,v2,v3) +GROUP BY 1; + time | text | int[] | text 4/8k +------+------+---------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 0 | | | + 1 | foo | {1,2,3} | 4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k + 2 | foo | {1,2,3} | 4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k + 3 | bar | {3,4,5} | 8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k + 4 | bar | {3,4,5} | 8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k +(5 rows) + +-- test locf with different datatypes and treat_null_as_missing +SELECT + time_bucket_gapfill(1,time,0,5) as time, + locf(min(v1),treat_null_as_missing:=true) AS text, + locf(min(v2),treat_null_as_missing:=true) AS "int[]", + locf(min(v3),treat_null_as_missing:=true) AS "text 4/8k" +FROM (VALUES + (1,'foo',ARRAY[1,2,3],repeat('4k',2048)), + (2,NULL,NULL,NULL), + (3,'bar',ARRAY[3,4,5],repeat('8k',4096)) +) v(time,v1,v2,v3) +GROUP BY 1; + time | text | int[] | text 4/8k +------+------+---------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 0 | | | + 1 | foo | {1,2,3} | 4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k + 2 | foo | {1,2,3} | 4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k + 3 | bar | {3,4,5} | 8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k + 4 | bar | {3,4,5} | 8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k +(5 rows) + +-- test interpolate +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + interpolate(min(value)) AS value +FROM (values (0,1),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | 1 + 10 | 2 + 20 | 3 + 30 | 4 + 40 | 5 + 50 | 6 +(6 rows) + +-- test interpolate with NULL values +SELECT + time_bucket_gapfill(1,time,0,5) AS time, + interpolate(avg(temp)) AS temp +FROM (VALUES (0,0),(2,NULL),(5,5)) v(time,temp) +GROUP BY 1; + time | temp +------+------ + 0 | 0 + 1 | + 2 | + 3 | + 4 | + 5 | 5 +(6 rows) + +-- test interpolate datatypes +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + interpolate(min(v1)) AS "smallint", + interpolate(min(v2)) AS "int", + interpolate(min(v3)) AS "bigint", + interpolate(min(v4)) AS "float4", + interpolate(min(v5)) AS "float8" +FROM (values (0,-3::smallint,-3::int,-3::bigint,-3::float4,-3::float8),(50,3::smallint,3::int,3::bigint,3::float4,3::float8)) v(time,v1,v2,v3,v4,v5) +GROUP BY 1 ORDER BY 1; + time | smallint | int | bigint | float4 | float8 +------+----------+-----+--------+--------+-------- + 0 | -3 | -3 | -3 | -3 | -3 + 10 | -2 | -2 | -2 | -1.8 | -1.8 + 20 | -1 | -1 | -1 | -0.6 | -0.6 + 30 | 1 | 1 | 1 | 0.6 | 0.6 + 40 | 2 | 2 | 2 | 1.8 | 1.8 + 50 | 3 | 3 | 3 | 3 | 3 +(6 rows) + +-- test interpolate datatypes with negative time +SELECT + time_bucket_gapfill(10,time,-40,30) AS time, + interpolate(min(v1)) AS "smallint", + interpolate(min(v2)) AS "int", + interpolate(min(v3)) AS "bigint", + interpolate(min(v4)) AS "float4", + interpolate(min(v5)) AS "float8" +FROM (values (-40,-3::smallint,-3::int,-3::bigint,-3::float4,-3::float8),(20,3::smallint,3::int,3::bigint,3::float4,3::float8)) v(time,v1,v2,v3,v4,v5) +GROUP BY 1 ORDER BY 1; + time | smallint | int | bigint | float4 | float8 +------+----------+-----+--------+--------+-------- + -40 | -3 | -3 | -3 | -3 | -3 + -30 | -2 | -2 | -2 | -2 | -2 + -20 | -1 | -1 | -1 | -1 | -1 + -10 | 0 | 0 | 0 | 0 | 0 + 0 | 1 | 1 | 1 | 1 | 1 + 10 | 2 | 2 | 2 | 2 | 2 + 20 | 3 | 3 | 3 | 3 | 3 +(7 rows) + +-- test interpolate with multiple groupings +SELECT + time_bucket_gapfill(5,time,0,11), + device, + interpolate(min(v1),(SELECT (-10,-10)),(SELECT (20,10))) +FROM (VALUES (5,1,0),(5,2,0)) as v(time,device,v1) +GROUP BY 1,2 ORDER BY 2,1; + time_bucket_gapfill | device | interpolate +---------------------+--------+------------- + 0 | 1 | -3 + 5 | 1 | 0 + 10 | 1 | 3 + 0 | 2 | -3 + 5 | 2 | 0 + 10 | 2 | 3 +(6 rows) + +-- test cte with gap filling in outer query +WITH data AS ( + SELECT * FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +) +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m +FROM data +GROUP BY 1,id; + time | id | m +------+----+--- + 0 | 1 | + 1 | 1 | 1 + 2 | 1 | + 3 | 1 | + 4 | 1 | + 0 | 2 | + 1 | 2 | + 2 | 2 | 2 + 3 | 2 | + 4 | 2 | +(10 rows) + +-- test cte with gap filling in inner query +WITH gapfill AS ( + SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m + FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) + GROUP BY 1,id +) +SELECT * FROM gapfill; + time | id | m +------+----+--- + 0 | 1 | + 1 | 1 | 1 + 2 | 1 | + 3 | 1 | + 4 | 1 | + 0 | 2 | + 1 | 2 | + 2 | 2 | 2 + 3 | 2 | + 4 | 2 | +(10 rows) + +-- test window functions +SELECT + time_bucket_gapfill(10,time,0,60), + interpolate(min(time)), + lag(min(time)) OVER () +FROM (VALUES (0),(50)) v(time) +GROUP BY 1; + time_bucket_gapfill | interpolate | lag +---------------------+-------------+----- + 0 | 0 | + 10 | 10 | 0 + 20 | 20 | + 30 | 30 | + 40 | 40 | + 50 | 50 | +(6 rows) + +-- test window functions with multiple windows +SELECT + time_bucket_gapfill(1,time,0,10), + interpolate(min(time)), + row_number() OVER (), + locf(min(time)), + sum(interpolate(min(time))) OVER (ROWS 1 PRECEDING), + sum(interpolate(min(time))) OVER (ROWS 2 PRECEDING), + sum(interpolate(min(time))) OVER (ROWS 3 PRECEDING), + sum(interpolate(min(time))) OVER (ROWS 4 PRECEDING) +FROM (VALUES (0),(9)) v(time) +GROUP BY 1; + time_bucket_gapfill | interpolate | row_number | locf | sum | sum | sum | sum +---------------------+-------------+------------+------+-----+-----+-----+----- + 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 + 1 | 1 | 2 | 0 | 1 | 1 | 1 | 1 + 2 | 2 | 3 | 0 | 3 | 3 | 3 | 3 + 3 | 3 | 4 | 0 | 5 | 6 | 6 | 6 + 4 | 4 | 5 | 0 | 7 | 9 | 10 | 10 + 5 | 5 | 6 | 0 | 9 | 12 | 14 | 15 + 6 | 6 | 7 | 0 | 11 | 15 | 18 | 20 + 7 | 7 | 8 | 0 | 13 | 18 | 22 | 25 + 8 | 8 | 9 | 0 | 15 | 21 | 26 | 30 + 9 | 9 | 10 | 9 | 17 | 24 | 30 | 35 +(10 rows) + +-- test window functions with constants +SELECT + time_bucket_gapfill(1,time,0,5), + min(time), + 4 as c, + lag(min(time)) OVER () +FROM (VALUES (1),(2),(3)) v(time) +GROUP BY 1; + time_bucket_gapfill | min | c | lag +---------------------+-----+---+----- + 0 | | 4 | + 1 | 1 | 4 | + 2 | 2 | 4 | 1 + 3 | 3 | 4 | 2 + 4 | | 4 | 3 +(5 rows) + +--test window functions with locf +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) AS "min", + lag(min(time)) over () AS lag_min, + lead(min(time)) over () AS lead_min, + locf(min(time)) AS locf, + lag(locf(min(time))) over () AS lag_locf, + lead(locf(min(time))) over () AS lead_locf +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; + time_bucket_gapfill | min | lag_min | lead_min | locf | lag_locf | lead_locf +---------------------+-----+---------+----------+------+----------+----------- + 0 | | | 1 | | | 1 + 1 | 1 | | 2 | 1 | | 2 + 2 | 2 | 1 | | 2 | 1 | 2 + 3 | | 2 | | 2 | 2 | 2 + 4 | | | | 2 | 2 | +(5 rows) + +--test window functions with interpolate +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) AS "min", + lag(min(time)) over () AS lag_min, + lead(min(time)) over () AS lead_min, + interpolate(min(time)) AS interpolate, + lag(interpolate(min(time))) over () AS lag_interpolate, + lead(interpolate(min(time))) over () AS lead_interpolate +FROM (VALUES (1),(3)) v(time) +GROUP BY 1; + time_bucket_gapfill | min | lag_min | lead_min | interpolate | lag_interpolate | lead_interpolate +---------------------+-----+---------+----------+-------------+-----------------+------------------ + 0 | | | 1 | | | 1 + 1 | 1 | | | 1 | | 2 + 2 | | 1 | 3 | 2 | 1 | 3 + 3 | 3 | | | 3 | 2 | + 4 | | 3 | | | 3 | +(5 rows) + +--test window functions with expressions +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) AS "min", + lag(min(time)) over () AS lag_min, + 1 + lag(min(time)) over () AS lag_min, + interpolate(min(time)) AS interpolate, + lag(interpolate(min(time))) over () AS lag_interpolate, + 1 + lag(interpolate(min(time))) over () AS lag_interpolate +FROM (VALUES (1),(3)) v(time) +GROUP BY 1; + time_bucket_gapfill | min | lag_min | lag_min | interpolate | lag_interpolate | lag_interpolate +---------------------+-----+---------+---------+-------------+-----------------+----------------- + 0 | | | | | | + 1 | 1 | | | 1 | | + 2 | | 1 | 2 | 2 | 1 | 2 + 3 | 3 | | | 3 | 2 | 3 + 4 | | 3 | 4 | | 3 | 4 +(5 rows) + +--test row_number/rank/percent_rank/... window functions with gapfill reference +SELECT + time_bucket_gapfill(1,time,0,5), + ntile(2) OVER () AS ntile_2, + ntile(3) OVER () AS ntile_3, + ntile(5) OVER () AS ntile_5, + row_number() OVER (), + cume_dist() OVER (ORDER BY time_bucket_gapfill(1,time,0,5)), + rank() OVER (), + rank() OVER (ORDER BY time_bucket_gapfill(1,time,0,5)), + percent_rank() OVER (ORDER BY time_bucket_gapfill(1,time,0,5)) +FROM (VALUES (1),(3)) v(time) +GROUP BY 1; + time_bucket_gapfill | ntile_2 | ntile_3 | ntile_5 | row_number | cume_dist | rank | rank | percent_rank +---------------------+---------+---------+---------+------------+-----------+------+------+-------------- + 0 | 1 | 1 | 1 | 1 | 0.2 | 1 | 1 | 0 + 1 | 1 | 1 | 2 | 2 | 0.4 | 1 | 2 | 0.25 + 2 | 1 | 2 | 3 | 3 | 0.6 | 1 | 3 | 0.5 + 3 | 2 | 2 | 4 | 4 | 0.8 | 1 | 4 | 0.75 + 4 | 2 | 3 | 5 | 5 | 1 | 1 | 5 | 1 +(5 rows) + +-- test first_value/last_value/nth_value +SELECT + time_bucket_gapfill(1,time,0,5), + first_value(min(time)) OVER (), + nth_value(min(time),3) OVER (), + last_value(min(time)) OVER () +FROM (VALUES (0),(2),(5)) v(time) +GROUP BY 1; + time_bucket_gapfill | first_value | nth_value | last_value +---------------------+-------------+-----------+------------ + 0 | 0 | 2 | 5 + 1 | 0 | 2 | 5 + 2 | 0 | 2 | 5 + 3 | 0 | 2 | 5 + 4 | 0 | 2 | 5 + 5 | 0 | 2 | 5 +(6 rows) + +-- test window functions with PARTITION BY +SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + row_number() OVER (), + row_number() OVER (PARTITION BY color) +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color ORDER BY 2,1; + time | color | row_number | row_number +------+-------+------------+------------ + 0 | blue | 1 | 1 + 1 | blue | 2 | 2 + 2 | blue | 3 | 3 + 3 | blue | 4 | 4 + 4 | blue | 5 | 5 + 0 | red | 6 | 1 + 1 | red | 7 | 2 + 2 | red | 8 | 3 + 3 | red | 9 | 4 + 4 | red | 10 | 5 +(10 rows) + +-- test multiple windows +\set ON_ERROR_STOP 0 +SELECT + time_bucket_gapfill(1,time,0,11), + first_value(interpolate(min(time))) OVER (ROWS 1 PRECEDING), + interpolate(min(time)), + last_value(interpolate(min(time))) OVER (ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +FROM (VALUES (0),(10)) v(time) +GROUP BY 1; + time_bucket_gapfill | first_value | interpolate | last_value +---------------------+-------------+-------------+------------ + 0 | 0 | 0 | 1 + 1 | 0 | 1 | 2 + 2 | 1 | 2 | 3 + 3 | 2 | 3 | 4 + 4 | 3 | 4 | 5 + 5 | 4 | 5 | 6 + 6 | 5 | 6 | 7 + 7 | 6 | 7 | 8 + 8 | 7 | 8 | 9 + 9 | 8 | 9 | 10 + 10 | 9 | 10 | 10 +(11 rows) + +-- test reorder +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m +FROM + (VALUES (1,1,1),(2,2,2)) v(time,id,value) +GROUP BY 1,id ORDER BY 1,id; + time | id | m +------+----+--- + 0 | 1 | + 0 | 2 | + 1 | 1 | 1 + 1 | 2 | + 2 | 1 | + 2 | 2 | 2 + 3 | 1 | + 3 | 2 | + 4 | 1 | + 4 | 2 | +(10 rows) + +-- test order by locf +SELECT + time_bucket_gapfill(1,time,1,6), + locf(min(time)) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 1,2; + time_bucket_gapfill | locf +---------------------+------ + 1 | + 2 | 2 + 3 | 3 + 4 | 3 + 5 | 3 +(5 rows) + +SELECT + time_bucket_gapfill(1,time,1,6), + locf(min(time)) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 2 NULLS FIRST,1; + time_bucket_gapfill | locf +---------------------+------ + 1 | + 2 | 2 + 3 | 3 + 4 | 3 + 5 | 3 +(5 rows) + +SELECT + time_bucket_gapfill(1,time,1,6), + locf(min(time)) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 2 NULLS LAST,1; + time_bucket_gapfill | locf +---------------------+------ + 2 | 2 + 3 | 3 + 4 | 3 + 5 | 3 + 1 | +(5 rows) + +-- test order by interpolate +SELECT + time_bucket_gapfill(1,time,1,6), + interpolate(min(time),prev:=(0,0)::record) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 1,2; + time_bucket_gapfill | interpolate +---------------------+------------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | + 5 | +(5 rows) + +SELECT + time_bucket_gapfill(1,time,1,6), + interpolate(min(time),prev:=(0,0)::record) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 2 NULLS FIRST,1; + time_bucket_gapfill | interpolate +---------------------+------------- + 4 | + 5 | + 1 | 1 + 2 | 2 + 3 | 3 +(5 rows) + +SELECT + time_bucket_gapfill(1,time,1,6), + interpolate(min(time),prev:=(0,0)::record) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 2 NULLS LAST,1; + time_bucket_gapfill | interpolate +---------------------+------------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | + 5 | +(5 rows) + +-- test queries on hypertable +-- test locf and interpolate together +SELECT + time_bucket_gapfill(interval '1h',time,timestamptz '2018-01-01 05:00:00-8', timestamptz '2018-01-01 07:00:00-8'), + device_id, + locf(avg(v1)) AS locf_v1, + locf(min(v2)) AS locf_v2, + interpolate(avg(v1)) AS interpolate_v1, + interpolate(avg(v2)) AS interpolate_v2 +FROM metrics_tstz +GROUP BY 1,2 +ORDER BY 1,2; + time_bucket_gapfill | device_id | locf_v1 | locf_v2 | interpolate_v1 | interpolate_v2 +------------------------------+-----------+---------+---------+----------------+---------------- + Mon Jan 01 05:00:00 2018 PST | 1 | 0.5 | 10 | 0.5 | 10 + Mon Jan 01 05:00:00 2018 PST | 2 | 0.7 | 20 | 0.7 | 20 + Mon Jan 01 05:00:00 2018 PST | 3 | 0.9 | 30 | 0.9 | 30 + Mon Jan 01 06:00:00 2018 PST | 1 | 0.5 | 10 | 0.25 | 5 + Mon Jan 01 06:00:00 2018 PST | 2 | 0.7 | 20 | 1.05 | 30 + Mon Jan 01 06:00:00 2018 PST | 3 | 0.9 | 30 | 0.9 | 30 + Mon Jan 01 07:00:00 2018 PST | 1 | 0 | 0 | 0 | 0 + Mon Jan 01 07:00:00 2018 PST | 2 | 1.4 | 40 | 1.4 | 40 + Mon Jan 01 07:00:00 2018 PST | 3 | 0.9 | 30 | 0.9 | 30 +(9 rows) + +SELECT + time_bucket_gapfill('12h'::interval,time,'2017-01-01'::timestamptz, '2017-01-02'::timestamptz), + interpolate( + avg(v1), + (SELECT ('2017-01-01'::timestamptz,1::float)), + (SELECT ('2017-01-02'::timestamptz,2::float)) + ) +FROM metrics_tstz WHERE time < '2017-01-01' GROUP BY 1; + time_bucket_gapfill | interpolate +------------------------------+------------------- + Sat Dec 31 16:00:00 2016 PST | 0.666666666666667 + Sun Jan 01 04:00:00 2017 PST | 1.16666666666667 + Sun Jan 01 16:00:00 2017 PST | 1.66666666666667 +(3 rows) + +SELECT + time_bucket_gapfill('12h'::interval,time,'2017-01-01'::timestamptz, '2017-01-02'::timestamptz), + interpolate( + avg(v1), + (SELECT ('2017-01-01'::timestamptz,1::float)), + (SELECT ('2017-01-02'::timestamptz,2::float)) + ) +FROM metrics_tstz WHERE time_bucket_gapfill('12h'::interval,time,'2017-01-01'::timestamptz, '2017-01-02'::timestamptz) < '2017-01-01' GROUP BY 1; + time_bucket_gapfill | interpolate +------------------------------+------------------- + Sat Dec 31 16:00:00 2016 PST | 0.666666666666667 + Sun Jan 01 04:00:00 2017 PST | 1.16666666666667 + Sun Jan 01 16:00:00 2017 PST | 1.66666666666667 +(3 rows) + +-- interpolation with correlated subquery lookup before interval +SELECT + time_bucket_gapfill('1h'::interval,time,'2018-01-01 3:00 PST'::timestamptz, '2018-01-01 8:00 PST'::timestamptz), + device_id, + interpolate( + avg(v1), + (SELECT (time,0.5::float) FROM metrics_tstz m2 WHERE m1.device_id=m2.device_id ORDER BY time DESC LIMIT 1) + ), + avg(v1) +FROM metrics_tstz m1 +WHERE device_id=1 GROUP BY 1,2 ORDER BY 1,2; + time_bucket_gapfill | device_id | interpolate | avg +------------------------------+-----------+-------------+----- + Mon Jan 01 03:00:00 2018 PST | 1 | 0.5 | + Mon Jan 01 04:00:00 2018 PST | 1 | 0.5 | + Mon Jan 01 05:00:00 2018 PST | 1 | 0.5 | 0.5 + Mon Jan 01 06:00:00 2018 PST | 1 | 0.25 | + Mon Jan 01 07:00:00 2018 PST | 1 | 0 | 0 +(5 rows) + +-- interpolation with correlated subquery lookup after interval +SELECT + time_bucket_gapfill('1h'::interval,time,'2018-01-01 5:00 PST'::timestamptz, '2018-01-01 9:00 PST'::timestamptz), + device_id, + interpolate( + avg(v1), + next=>(SELECT (time,v2::float) FROM metrics_tstz m2 WHERE m1.device_id=m2.device_id ORDER BY time LIMIT 1) + ),avg(v1) +FROM metrics_tstz m1 WHERE device_id=1 GROUP BY 1,2 ORDER BY 1,2; + time_bucket_gapfill | device_id | interpolate | avg +------------------------------+-----------+-------------+----- + Mon Jan 01 05:00:00 2018 PST | 1 | 0.5 | 0.5 + Mon Jan 01 06:00:00 2018 PST | 1 | 0.25 | + Mon Jan 01 07:00:00 2018 PST | 1 | 0 | 0 + Mon Jan 01 08:00:00 2018 PST | 1 | -5 | +(4 rows) + +\set ON_ERROR_STOP 0 +-- bucket_width non simple expression +SELECT + time_bucket_gapfill(t,t) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be a simple expression +-- no start/finish and no usable time constraints +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- NULL start/finish and no usable time constraints +SELECT + time_bucket_gapfill(1,t,NULL,NULL) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- no start and no usable time constraints +SELECT + time_bucket_gapfill(1,t,finish:=1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- NULL start expression and no usable time constraints +SELECT + time_bucket_gapfill(1,t,CASE WHEN length(version())>0 THEN NULL::int ELSE NULL::int END,1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- unsupported start expression and no usable time constraints +SELECT + time_bucket_gapfill(1,t,t,1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start must be a simple expression +-- NULL start and no usable time constraints +SELECT + time_bucket_gapfill(1,t,NULL,1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- NULL finish expression and no usable time constraints +SELECT + time_bucket_gapfill(1,t,1,CASE WHEN length(version())>0 THEN NULL::int ELSE NULL::int END) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- unsupported finish expression and no usable time constraints +SELECT + time_bucket_gapfill(1,t,1,t) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish must be a simple expression +-- no finish and no usable time constraints +SELECT + time_bucket_gapfill(1,t,1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer finish from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- NULL finish and no usable time constraints +SELECT + time_bucket_gapfill(1,t,1,NULL) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer finish from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with column reference on right side +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE t > t AND t < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with cast +SELECT + time_bucket_gapfill(1,t1::int8) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 >= 1 AND t1 <= 2 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts needs to refer to a single column if no start or finish is supplied +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with multiple column references +SELECT + time_bucket_gapfill(1,t1+t2) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > 1 AND t1 < 2 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts needs to refer to a single column if no start or finish is supplied +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with NULL start in WHERE clause, we use CASE to wrap the NULL so it doesnt get folded +SELECT + time_bucket_gapfill(1,t1) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > CASE WHEN length(version()) > 0 THEN NULL::int ELSE NULL::int END AND t1 < 4 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with NULL finish in WHERE clause, we use CASE to wrap the NULL so it doesnt get folded +SELECT + time_bucket_gapfill(1,t1) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > 0 AND t1 < CASE WHEN length(version()) > 0 THEN NULL::int ELSE NULL::int END +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- non-Const NULL as start argument, we use CASE to wrap the NULL so it doesnt get folded +SELECT + time_bucket_gapfill(1,t1,CASE WHEN length(version())>0 THEN NULL::int ELSE NULL::int END) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > 0 AND t1 < 2 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- non-Const NULL as finish argument, we use CASE to wrap the NULL so it doesnt get folded +SELECT + time_bucket_gapfill(1,t1,NULL,CASE WHEN length(version())>0 THEN NULL::int ELSE NULL::int END) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > 0 AND t1 < 2 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- time_bucket_gapfill with constraints ORed +SELECT + time_bucket_gapfill(1::int8,t::int8) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= -1 OR t < 3 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts needs to refer to a single column if no start or finish is supplied +HINT: Specify start and finish as arguments or in the WHERE clause. +\set ON_ERROR_STOP 1 +-- int32 time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= -1 AND t < 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 +(4 rows) + +-- same query with less or equal as finish +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= -1 AND t <= 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 + 3 +(5 rows) + +-- int32 time_bucket_gapfill with start column and value switched +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + -1 < t AND t < 3 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 +(3 rows) + +-- int32 time_bucket_gapfill with finish column and value switched +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= 0 AND 3 >= t +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 + 3 +(4 rows) + +-- int16 time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill(1::int2,t) +FROM (VALUES (1::int2),(2::int2)) v(t) +WHERE + t >= -1 AND t < 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 +(4 rows) + +-- int64 time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill(1::int8,t) +FROM (VALUES (1::int8),(2::int8)) v(t) +WHERE + t >= -1 AND t < 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 +(4 rows) + +-- date time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill('1d'::interval,t) +FROM (VALUES ('1999-12-30'::date),('2000-01-01'::date)) v(t) +WHERE + t >= '1999-12-29' AND t < '2000-01-03' +GROUP BY 1; + time_bucket_gapfill + 12-29-1999 + 12-30-1999 + 12-31-1999 + 01-01-2000 + 01-02-2000 +(5 rows) + +-- timestamp time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill('12h'::interval,t) +FROM (VALUES ('1999-12-30'::timestamp),('2000-01-01'::timestamp)) v(t) +WHERE + t >= '1999-12-29' AND t < '2000-01-03' +GROUP BY 1; + time_bucket_gapfill + Wed Dec 29 00:00:00 1999 + Wed Dec 29 12:00:00 1999 + Thu Dec 30 00:00:00 1999 + Thu Dec 30 12:00:00 1999 + Fri Dec 31 00:00:00 1999 + Fri Dec 31 12:00:00 1999 + Sat Jan 01 00:00:00 2000 + Sat Jan 01 12:00:00 2000 + Sun Jan 02 00:00:00 2000 + Sun Jan 02 12:00:00 2000 +(10 rows) + +-- timestamptz time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill('12h'::interval,t) +FROM (VALUES ('1999-12-30'::timestamptz),('2000-01-01'::timestamptz)) v(t) +WHERE + t >= '1999-12-29' AND t < '2000-01-03' +GROUP BY 1; + time_bucket_gapfill + Tue Dec 28 16:00:00 1999 PST + Wed Dec 29 04:00:00 1999 PST + Wed Dec 29 16:00:00 1999 PST + Thu Dec 30 04:00:00 1999 PST + Thu Dec 30 16:00:00 1999 PST + Fri Dec 31 04:00:00 1999 PST + Fri Dec 31 16:00:00 1999 PST + Sat Jan 01 04:00:00 2000 PST + Sat Jan 01 16:00:00 2000 PST + Sun Jan 02 04:00:00 2000 PST + Sun Jan 02 16:00:00 2000 PST +(11 rows) + +-- timestamptz time_bucket_gapfill with more complex expression +SELECT + time_bucket_gapfill('12h'::interval,t) +FROM (VALUES ('1999-12-30'::timestamptz),('2000-01-01'::timestamptz)) v(t) +WHERE + t >= '2000-01-03'::timestamptz - '4d'::interval AND t < '2000-01-03' +GROUP BY 1; + time_bucket_gapfill + Wed Dec 29 16:00:00 1999 PST + Thu Dec 30 04:00:00 1999 PST + Thu Dec 30 16:00:00 1999 PST + Fri Dec 31 04:00:00 1999 PST + Fri Dec 31 16:00:00 1999 PST + Sat Jan 01 04:00:00 2000 PST + Sat Jan 01 16:00:00 2000 PST + Sun Jan 02 04:00:00 2000 PST + Sun Jan 02 16:00:00 2000 PST +(9 rows) + +-- timestamptz time_bucket_gapfill with different datatype in finish constraint +SELECT + time_bucket_gapfill('12h'::interval,t) +FROM (VALUES ('1999-12-30'::timestamptz),('2000-01-01'::timestamptz)) v(t) +WHERE + t >= '2000-01-03'::timestamptz - '4d'::interval AND t < '2000-01-03'::date +GROUP BY 1; + time_bucket_gapfill + Wed Dec 29 16:00:00 1999 PST + Thu Dec 30 04:00:00 1999 PST + Thu Dec 30 16:00:00 1999 PST + Fri Dec 31 04:00:00 1999 PST + Fri Dec 31 16:00:00 1999 PST + Sat Jan 01 04:00:00 2000 PST + Sat Jan 01 16:00:00 2000 PST + Sun Jan 02 04:00:00 2000 PST + Sun Jan 02 16:00:00 2000 PST +(9 rows) + +-- time_bucket_gapfill with now() as start +SELECT + time_bucket_gapfill('1h'::interval,t) +FROM (VALUES (now()),(now())) v(t) +WHERE + t >= now() AND t < now() - '1h'::interval +GROUP BY 1; + time_bucket_gapfill +(0 rows) + +-- time_bucket_gapfill with multiple constraints +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= -1 AND t < 3 and t>1 AND t <=4 AND length(version()) > 0 +GROUP BY 1; + time_bucket_gapfill + 2 +(1 row) + +-- int32 time_bucket_gapfill with greater for start +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t > -2 AND t < 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 +(4 rows) + +-- test DISTINCT +SELECT DISTINCT ON (color) + time_bucket_gapfill(1,time,0,5) as time, + color, + min(value) as m +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color ORDER BY 2,1; + time | color | m +------+-------+--- + 0 | blue | + 0 | red | +(2 rows) + +-- test DISTINCT with window functions +SELECT DISTINCT ON (row_number() OVER ()) + time_bucket_gapfill(1,time,0,5) as time, + color, + row_number() OVER () +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color; + time | color | row_number +------+-------+------------ + 0 | blue | 1 + 1 | blue | 2 + 2 | blue | 3 + 3 | blue | 4 + 4 | blue | 5 + 0 | red | 6 + 1 | red | 7 + 2 | red | 8 + 3 | red | 9 + 4 | red | 10 +(10 rows) + +-- test DISTINCT with window functions and PARTITION BY +SELECT DISTINCT ON (color,row_number() OVER (PARTITION BY color)) + time_bucket_gapfill(1,time,0,5) as time, + color, + row_number() OVER (PARTITION BY color) +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color; + time | color | row_number +------+-------+------------ + 0 | blue | 1 + 1 | blue | 2 + 2 | blue | 3 + 3 | blue | 4 + 4 | blue | 5 + 0 | red | 1 + 1 | red | 2 + 2 | red | 3 + 3 | red | 4 + 4 | red | 5 +(10 rows) + +-- test DISTINCT with window functions not in targetlist +SELECT DISTINCT ON (row_number() OVER ()) + time_bucket_gapfill(1,time,0,5) as time, + color, + row_number() OVER (PARTITION BY color) +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color; + time | color | row_number +------+-------+------------ + 0 | blue | 1 + 1 | blue | 2 + 2 | blue | 3 + 3 | blue | 4 + 4 | blue | 5 + 0 | red | 1 + 1 | red | 2 + 2 | red | 3 + 3 | red | 4 + 4 | red | 5 +(10 rows) + +-- test column references +SELECT + row_number() OVER (PARTITION BY color), + locf(min(time)), + color, + time_bucket_gapfill(1,time,0,5) as time +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 3,4; + row_number | locf | color | time +------------+------+-------+------ + 1 | | blue | 0 + 2 | 1 | blue | 1 + 3 | 1 | blue | 2 + 4 | 1 | blue | 3 + 5 | 1 | blue | 4 + 1 | | red | 0 + 2 | | red | 1 + 3 | 2 | red | 2 + 4 | 2 | red | 3 + 5 | 2 | red | 4 +(10 rows) + +-- test with Nested Loop +SELECT l.id, bucket, data_value FROM + (VALUES (1), (2), (3), (4)) a(id) + INNER JOIN LATERAL ( + SELECT b.id id, time_bucket_gapfill('1'::int, time, start=>'1'::int, finish=> '5'::int) bucket, locf(last(data, time)) data_value + FROM (VALUES (1, 1, 1), (1, 4, 4), (2, 1, -1), (2, 4, -4)) b(id, time, data) + WHERE a.id = b.id + GROUP BY b.id, bucket + ) as l on (true); + id | bucket | data_value +----+--------+------------ + 1 | 1 | 1 + 1 | 2 | 1 + 1 | 3 | 1 + 1 | 4 | 4 + 2 | 1 | -1 + 2 | 2 | -1 + 2 | 3 | -1 + 2 | 4 | -4 +(8 rows) + +-- test prepared statement +PREPARE prep_gapfill AS +SELECT + time_bucket_gapfill(1,time,0,5) as time, + locf(min(value)) +FROM (VALUES (1,1),(2,2)) v(time,value) +GROUP BY 1; +-- execute 10 times to make sure turning it into generic plan works +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +DEALLOCATE prep_gapfill; +-- test column references with TIME_COLUMN last +SELECT + row_number() OVER (PARTITION BY color), + locf(min(time)), + color, + time_bucket_gapfill(1,time,0,5) as time +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 3,4; + row_number | locf | color | time +------------+------+-------+------ + 1 | | blue | 0 + 2 | 1 | blue | 1 + 3 | 1 | blue | 2 + 4 | 1 | blue | 3 + 5 | 1 | blue | 4 + 1 | | red | 0 + 2 | | red | 1 + 3 | 2 | red | 2 + 4 | 2 | red | 3 + 5 | 2 | red | 4 +(10 rows) + +-- test expressions on GROUP BY columns +SELECT + row_number() OVER (PARTITION BY color), + locf(min(time)), + color, + length(color), + time_bucket_gapfill(1,time,0,5) as time +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 3,5; + row_number | locf | color | length | time +------------+------+-------+--------+------ + 1 | | blue | 4 | 0 + 2 | 1 | blue | 4 | 1 + 3 | 1 | blue | 4 | 2 + 4 | 1 | blue | 4 | 3 + 5 | 1 | blue | 4 | 4 + 1 | | red | 3 | 0 + 2 | | red | 3 | 1 + 3 | 2 | red | 3 | 2 + 4 | 2 | red | 3 | 3 + 5 | 2 | red | 3 | 4 +(10 rows) + +-- test columns derived from GROUP BY columns with cast +SELECT + time_bucket_gapfill(1,time,0,5) as time, + device_id::text +FROM (VALUES (1,1),(2,2)) v(time,device_id) +GROUP BY 1,device_id; + time | device_id +------+----------- + 0 | 1 + 1 | 1 + 2 | 1 + 3 | 1 + 4 | 1 + 0 | 2 + 1 | 2 + 2 | 2 + 3 | 2 + 4 | 2 +(10 rows) + +-- test columns derived from GROUP BY columns with expression +SELECT + time_bucket_gapfill(1,time,0,5) as time, + 'Device ' || device_id::text +FROM (VALUES (1,1),(2,2)) v(time,device_id) +GROUP BY 1,device_id; + time | ?column? +------+---------- + 0 | Device 1 + 1 | Device 1 + 2 | Device 1 + 3 | Device 1 + 4 | Device 1 + 0 | Device 2 + 1 | Device 2 + 2 | Device 2 + 3 | Device 2 + 4 | Device 2 +(10 rows) + +--test interpolation with big differences in values (test overflows in calculations) +--we use the biggest possible difference in time(x) and the value(y). +--For bigints we also test values of smaller than bigintmax/min to avoid +--the symmetry where x=y (which catches more errors) +SELECT 9223372036854775807 as big_int_max \gset +SELECT -9223372036854775808 as big_int_min \gset +SELECT + time_bucket_gapfill(1,time,0,1) AS time, + interpolate(min(s)) AS "smallint", + interpolate(min(i)) AS "int", + interpolate(min(b)) AS "bigint", + interpolate(min(b2)) AS "bigint2", + interpolate(min(d)) AS "double" +FROM (values (:big_int_min,(-32768)::smallint,(-2147483648)::int,:big_int_min,-2147483648::bigint, '-Infinity'::double precision), + (:big_int_max, 32767::smallint, 2147483647::int,:big_int_max, 2147483647::bigint, 'Infinity'::double precision)) v(time,s,i,b,b2,d) +GROUP BY 1 ORDER BY 1; + time | smallint | int | bigint | bigint2 | double +----------------------+----------+-------------+----------------------+-------------+----------- + -9223372036854775808 | -32768 | -2147483648 | -9223372036854775808 | -2147483648 | -Infinity + 0 | 0 | 0 | 0 | 0 | Infinity + 9223372036854775807 | 32767 | 2147483647 | 9223372036854775807 | 2147483647 | Infinity +(3 rows) + +-- issue #2232: This query used to trigger error "could not find +-- pathkey item to sort" due to a corrupt query plan +SELECT time_bucket_gapfill('1 h', time) AS time, + locf(sum(v1)) AS v1_sum, + interpolate(sum(v2)) AS v2_sum +FROM metrics_tstz +WHERE time >= '2018-01-01 04:00' AND time < '2018-01-01 08:00' +GROUP BY 1 +ORDER BY 1 DESC; + time | v1_sum | v2_sum +------------------------------+--------+-------- + Mon Jan 01 07:00:00 2018 PST | 2.3 | 70 + Mon Jan 01 06:00:00 2018 PST | 2.1 | 65 + Mon Jan 01 05:00:00 2018 PST | 2.1 | 60 + Mon Jan 01 04:00:00 2018 PST | | +(4 rows) + +-- query without gapfill: +SELECT time_bucket('1 h', time) AS time, + sum(v1) AS v1_sum, + sum(v2) AS v1_sum +FROM metrics_tstz +WHERE time >= '2018-01-01 04:00' AND time < '2018-01-01 08:00' +GROUP BY 1 +ORDER BY 1 DESC; + time | v1_sum | v1_sum +------------------------------+--------+-------- + Mon Jan 01 07:00:00 2018 PST | 2.3 | 70 + Mon Jan 01 05:00:00 2018 PST | 2.1 | 60 +(2 rows) + +-- query to show original data +SELECT * FROM metrics_tstz +WHERE time >= '2018-01-01 04:00' AND time < '2018-01-01 08:00' +ORDER BY 1 DESC, 2; + time | device_id | v1 | v2 +------------------------------+-----------+-----+---- + Mon Jan 01 07:00:00 2018 PST | 1 | 0 | 0 + Mon Jan 01 07:00:00 2018 PST | 2 | 1.4 | 40 + Mon Jan 01 07:00:00 2018 PST | 3 | 0.9 | 30 + Mon Jan 01 05:00:00 2018 PST | 1 | 0.5 | 10 + Mon Jan 01 05:00:00 2018 PST | 2 | 0.7 | 20 + Mon Jan 01 05:00:00 2018 PST | 3 | 0.9 | 30 +(6 rows) + +-- issue #3048 +-- test gapfill/hashagg planner interaction +-- this used to produce a plan without gapfill node +EXPLAIN (costs off) SELECT time_bucket_gapfill('52w', time, start:='2000-01-01', finish:='2000-01-10') AS time, + sum(v1) AS v1_sum +FROM metrics +GROUP BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Sort + Sort Key: (time_bucket_gapfill('@ 364 days'::interval, _hyper_X_X_chunk."time", 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone, 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> HashAggregate + Group Key: time_bucket_gapfill('@ 364 days'::interval, _hyper_X_X_chunk."time", 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone, 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(10 rows) + +-- issue #3834 +-- test projection handling in gapfill +CREATE TABLE i3834(time timestamptz NOT NULL, ship_id int, value float); +SELECT table_name FROM create_hypertable('i3834','time'); + table_name + i3834 +(1 row) + +INSERT INTO i3834 VALUES ('2020-12-01 14:05:00+01',1,3.123), ('2020-12-01 14:05:00+01',2,4.123), ('2020-12-01 14:05:00+01',3,5.123); +SELECT + time_bucket_gapfill('30000 ms'::interval, time) AS time, + ship_id, + interpolate (avg(value)), + 'speedlog' AS source +FROM + i3834 +WHERE + ship_id IN (1, 2) + AND time >= '2020-12-01 14:05:00+01' + AND time < '2020-12-01 14:10:00+01' +GROUP BY 1,2; + time | ship_id | interpolate | source +------------------------------+---------+-------------+---------- + Tue Dec 01 05:05:00 2020 PST | 1 | 3.123 | speedlog + Tue Dec 01 05:05:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:06:00 2020 PST | 1 | | speedlog + Tue Dec 01 05:06:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:07:00 2020 PST | 1 | | speedlog + Tue Dec 01 05:07:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:08:00 2020 PST | 1 | | speedlog + Tue Dec 01 05:08:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:09:00 2020 PST | 1 | | speedlog + Tue Dec 01 05:09:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:05:00 2020 PST | 2 | 4.123 | speedlog + Tue Dec 01 05:05:30 2020 PST | 2 | | speedlog + Tue Dec 01 05:06:00 2020 PST | 2 | | speedlog + Tue Dec 01 05:06:30 2020 PST | 2 | | speedlog + Tue Dec 01 05:07:00 2020 PST | 2 | | speedlog + Tue Dec 01 05:07:30 2020 PST | 2 | | speedlog + Tue Dec 01 05:08:00 2020 PST | 2 | | speedlog + Tue Dec 01 05:08:30 2020 PST | 2 | | speedlog + Tue Dec 01 05:09:00 2020 PST | 2 | | speedlog + Tue Dec 01 05:09:30 2020 PST | 2 | | speedlog +(20 rows) + +DROP TABLE i3834; +-- issue #1528 +-- test float rounding for certain float values when start and end are identical +SELECT + time_bucket_gapfill('1min'::interval, ts::timestamptz, start:='2019-11-05 2:20', finish:='2019-11-05 2:30'), + interpolate(avg(20266.959547::float4)) AS float4, + interpolate(avg(20266.959547::float8)) AS float8 +FROM (VALUES ('2019-11-05 2:20'), ('2019-11-05 2:30')) v (ts) +GROUP BY 1; + time_bucket_gapfill | float4 | float8 +------------------------------+-----------------+-------------- + Tue Nov 05 02:20:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:21:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:22:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:23:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:24:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:25:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:26:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:27:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:28:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:29:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:30:00 2019 PST | 20266.958984375 | 20266.959547 +(11 rows) + +-- check gapfill group change detection with TOASTed values +CREATE TABLE gapfill_group_toast(time timestamptz NOT NULL, device text, value float); +SELECT table_name FROM create_hypertable('gapfill_group_toast', 'time'); + table_name + gapfill_group_toast +(1 row) + +INSERT INTO gapfill_group_toast +SELECT + generate_series('2022-06-01'::timestamptz, '2022-06-03'::timestamptz, '1min'::interval), + '4e0ee04cc6a94fd40497b8dbaac2fe434e0ee04cc6a94fd40497b8dbaac2fe43', + random(); +ALTER TABLE gapfill_group_toast SET(timescaledb.compress, timescaledb.compress_segmentby = 'device'); +SELECT count(compress_chunk(c)) FROM show_chunks('gapfill_group_toast') c; + count + 2 +(1 row) + +SELECT + time_bucket_gapfill('1 day', time), device +FROM gapfill_group_toast +WHERE time >= '2022-06-01' AND time <= '2022-06-02' +GROUP BY 1,2; + time_bucket_gapfill | device +------------------------------+------------------------------------------------------------------ + Tue May 31 17:00:00 2022 PDT | 4e0ee04cc6a94fd40497b8dbaac2fe434e0ee04cc6a94fd40497b8dbaac2fe43 + Wed Jun 01 17:00:00 2022 PDT | 4e0ee04cc6a94fd40497b8dbaac2fe434e0ee04cc6a94fd40497b8dbaac2fe43 +(2 rows) + +DROP TABLE gapfill_group_toast; +-- test bucketing by month +SELECT time_bucket_gapfill('2 month'::interval, ts, '2000-01-01'::timestamptz,'2001-01-01'::timestamptz) FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Fri Dec 31 16:00:00 1999 PST + Tue Feb 29 16:00:00 2000 PST + Sun Apr 30 17:00:00 2000 PDT + Fri Jun 30 17:00:00 2000 PDT + Thu Aug 31 17:00:00 2000 PDT + Tue Oct 31 16:00:00 2000 PST + Sun Dec 31 16:00:00 2000 PST +(7 rows) + +SELECT time_bucket_gapfill('1 year'::interval, ts, '2000-01-01'::timestamptz,'2003-01-01'::timestamptz) FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Fri Dec 31 16:00:00 1999 PST + Sun Dec 31 16:00:00 2000 PST + Mon Dec 31 16:00:00 2001 PST + Tue Dec 31 16:00:00 2002 PST +(4 rows) + +SELECT time_bucket_gapfill('1 century'::interval, ts, '1900-01-01'::timestamptz,'2103-01-01'::timestamptz) FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Sun Dec 31 16:00:00 1899 PST + Fri Dec 31 16:00:00 1999 PST + Thu Dec 31 16:00:00 2099 PST +(3 rows) + +-- test bucketing with timezone +SELECT time_bucket_gapfill('2 month'::interval, ts, 'Europe/Berlin', '2000-01-01','2001-01-01') FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Fri Dec 31 15:00:00 1999 PST + Tue Feb 29 15:00:00 2000 PST + Sat Apr 29 15:00:00 2000 PDT + Thu Jun 29 15:00:00 2000 PDT + Tue Aug 29 15:00:00 2000 PDT + Sun Oct 29 15:00:00 2000 PST + Fri Dec 29 15:00:00 2000 PST +(7 rows) + +SELECT time_bucket_gapfill('2 month'::interval, ts, current_setting('timezone'), '2000-01-01','2001-01-01') FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Sat Jan 01 00:00:00 2000 PST + Wed Mar 01 00:00:00 2000 PST + Mon May 01 00:00:00 2000 PDT + Sat Jul 01 00:00:00 2000 PDT + Fri Sep 01 00:00:00 2000 PDT + Wed Nov 01 00:00:00 2000 PST +(6 rows) + +SELECT time_bucket_gapfill('2 month'::interval, ts, 'UTC', '2000-01-01','2001-01-01') FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Fri Dec 31 16:00:00 1999 PST + Tue Feb 29 16:00:00 2000 PST + Sat Apr 29 16:00:00 2000 PDT + Thu Jun 29 16:00:00 2000 PDT + Tue Aug 29 16:00:00 2000 PDT + Sun Oct 29 16:00:00 2000 PST + Fri Dec 29 16:00:00 2000 PST +(7 rows) + +SET timezone TO 'Europe/Berlin'; +SELECT time_bucket_gapfill('2 month'::interval, ts, 'Europe/Berlin', '2000-01-01','2001-01-01') FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Sat Jan 01 00:00:00 2000 CET + Wed Mar 01 00:00:00 2000 CET + Mon May 01 00:00:00 2000 CEST + Sat Jul 01 00:00:00 2000 CEST + Fri Sep 01 00:00:00 2000 CEST + Wed Nov 01 00:00:00 2000 CET +(6 rows) + +RESET timezone; +DROP INDEX gapfill_plan_test_indx; +-- Test gapfill with arrays (#5981) +SELECT time_bucket_gapfill(5, ts, 1, 100) as ts, int_arr, locf(last(value, ts)) +FROM ( + SELECT ARRAY[1,2,3,4]::int[] as int_arr, x as ts, x+500000 as value + FROM generate_series(1, 10, 100) as x + ) t +GROUP BY 1, 2 + ts | int_arr | locf +----+-----------+-------- + 0 | {1,2,3,4} | 500001 + 5 | {1,2,3,4} | 500001 + 10 | {1,2,3,4} | 500001 + 15 | {1,2,3,4} | 500001 + 20 | {1,2,3,4} | 500001 + 25 | {1,2,3,4} | 500001 + 30 | {1,2,3,4} | 500001 + 35 | {1,2,3,4} | 500001 + 40 | {1,2,3,4} | 500001 + 45 | {1,2,3,4} | 500001 + 50 | {1,2,3,4} | 500001 + 55 | {1,2,3,4} | 500001 + 60 | {1,2,3,4} | 500001 + 65 | {1,2,3,4} | 500001 + 70 | {1,2,3,4} | 500001 + 75 | {1,2,3,4} | 500001 + 80 | {1,2,3,4} | 500001 + 85 | {1,2,3,4} | 500001 + 90 | {1,2,3,4} | 500001 + 95 | {1,2,3,4} | 500001 +(20 rows) + diff --git a/tsl/test/shared/expected/gapfill-15.out b/tsl/test/shared/expected/gapfill-15.out new file mode 100644 index 00000000000..d2e2e4ec598 --- /dev/null +++ b/tsl/test/shared/expected/gapfill-15.out @@ -0,0 +1,3366 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set EXPLAIN 'EXPLAIN (COSTS OFF)' +-- we want to see error details in the output +\set VERBOSITY default +CREATE TABLE gapfill_plan_test(time timestamptz NOT NULL, value float); +SELECT table_name FROM create_hypertable('gapfill_plan_test','time',chunk_time_interval=>'4 weeks'::interval); + table_name + gapfill_plan_test +(1 row) + +INSERT INTO gapfill_plan_test SELECT generate_series('2018-01-01'::timestamptz,'2018-04-01'::timestamptz,'1m'::interval), 1.0; +-- simple example +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Values Scan on "*VALUES*" +(6 rows) + +-- test sorting +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1 +ORDER BY 2; +QUERY PLAN + Sort + Sort Key: (avg("*VALUES*".column2)) + -> Custom Scan (GapFill) + -> GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Values Scan on "*VALUES*" +(8 rows) + +-- test sort direction +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1 +ORDER BY 1 DESC; +QUERY PLAN + Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) DESC + -> Custom Scan (GapFill) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) NULLS FIRST + -> HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now()) + -> Values Scan on "*VALUES*" +(8 rows) + +-- test order by aggregate function +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1 +ORDER BY 2,1; +QUERY PLAN + Sort + Sort Key: (avg("*VALUES*".column2)), (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Custom Scan (GapFill) + -> GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Values Scan on "*VALUES*" +(8 rows) + +-- test query without order by +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Values Scan on "*VALUES*" +(6 rows) + +-- test parallel query +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + avg(value) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Finalize GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Partial HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Parallel Append + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk +(15 rows) + +-- test parallel query with locf +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + locf(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Finalize GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Partial HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Parallel Append + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk +(15 rows) + +-- test parallel query with interpolate +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + interpolate(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Finalize GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Partial HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Parallel Append + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk +(15 rows) + +-- make sure we can run gapfill in parallel workers +-- ensure this plan runs in parallel +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + interpolate(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 2 +LIMIT 1; +QUERY PLAN + Limit + -> Sort + Sort Key: (interpolate(avg(gapfill_plan_test.value), NULL::record, NULL::record)) + -> Custom Scan (GapFill) + -> Finalize GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Partial HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Parallel Append + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk +(18 rows) + +-- actually run a parallel gapfill +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + interpolate(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 2 +LIMIT 1; + time_bucket_gapfill | interpolate +------------------------------+------------- + Mon Jan 01 00:00:00 2018 PST | 1 +(1 row) + +-- test sort optimizations +-- test sort optimization with single member order by, +-- should use index scan (no GapFill node for this one since we're not gapfilling) +:EXPLAIN SELECT time_bucket_gapfill('5m',time),value +FROM gapfill_plan_test +ORDER BY 1; +QUERY PLAN + Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone)) + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(8 rows) + +SET max_parallel_workers_per_gather TO 0; +-- test sort optimizations with locf +:EXPLAIN SELECT time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), locf(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(11 rows) + +-- test sort optimizations with interpolate +:EXPLAIN SELECT time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), interpolate(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(11 rows) + +RESET max_parallel_workers_per_gather; +CREATE INDEX gapfill_plan_test_indx ON gapfill_plan_test(value, time); +-- test sort optimization with ordering by multiple columns and time_bucket_gapfill not last, +-- must not use index scan +:EXPLAIN SELECT time_bucket_gapfill('5m',time),value +FROM gapfill_plan_test +ORDER BY 1,2; +QUERY PLAN + Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone)), _hyper_X_X_chunk.value + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(8 rows) + +-- test sort optimization with ordering by multiple columns and time_bucket as last member, +-- should use index scan +:EXPLAIN SELECT time_bucket_gapfill('5m',time),value +FROM gapfill_plan_test +ORDER BY 2,1; +QUERY PLAN + Sort + Sort Key: _hyper_X_X_chunk.value, (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone)) + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(8 rows) + +\set METRICS metrics_int +-- All test against table :METRICS first +\set ON_ERROR_STOP 0 +-- inverse of previous test query to confirm an error is actually thrown +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3 +FROM :METRICS m1 +WHERE time = 5 +GROUP BY 1,2,3 ORDER BY 2,3,1; +ERROR: division by zero +-- test window functions with multiple column references +SELECT + time_bucket_gapfill(1,time,1,2), + first(min(time),min(time)) OVER () +FROM :METRICS +GROUP BY 1; +ERROR: window functions with multiple column references not supported +-- test with unsupported operator +SELECT + time_bucket_gapfill(1,time) +FROM :METRICS +WHERE time =0 AND time < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- test with 2 tables and where clause doesnt match gapfill argument +SELECT + time_bucket_gapfill(1,m2.time) +FROM :METRICS m, :METRICS m2 +WHERE m.time >=0 AND m.time < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- test inner join and where clause doesnt match gapfill argument +SELECT + time_bucket_gapfill(1,m2.time) +FROM :METRICS m1 INNER JOIN :METRICS m2 ON m1.time=m2.time +WHERE m1.time >=0 AND m1.time < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- test outer join with constraints in join condition +-- not usable as start/stop +SELECT + time_bucket_gapfill(1,m1.time) +FROM :METRICS m1 LEFT OUTER JOIN :METRICS m2 ON m1.time=m2.time AND m1.time >=0 AND m1.time < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +\set ON_ERROR_STOP 1 +\ir include/gapfill_metrics_query.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- test locf lookup query does not trigger when not needed +-- 1/(SELECT 0) will throw an error in the lookup query but in order to not +-- always trigger evaluation it needs to be correlated otherwise postgres will +-- always run it once even if the value is never used +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3 +FROM :METRICS m1 +WHERE time >= 0 AND time < 5 +GROUP BY 1,2,3 ORDER BY 2,3,1; + time | device_id | sensor_id | locf3 +------+-----------+-----------+------- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +-- test locf with correlated subquery +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + avg(value), + locf(min(value)) AS locf, + locf(min(value)::int,23) AS locf1, + locf(min(value)::int,(SELECT 42)) AS locf2, + locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3 +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 2,3,1; + time | device_id | sensor_id | avg | locf | locf1 | locf2 | locf3 +------+-----------+-----------+-----+------+-------+-------+------- + 0 | 1 | 1 | 5 | 5 | 5 | 5 | 5 + 5 | 1 | 1 | | 5 | 5 | 5 | 5 + 10 | 1 | 1 | | 5 | 5 | 5 | 5 + 0 | 1 | 2 | | | 23 | 42 | -100 + 5 | 1 | 2 | 10 | 10 | 10 | 10 | 10 + 10 | 1 | 2 | | 10 | 10 | 10 | 10 +(6 rows) + +-- test locf with correlated subquery and "wrong order" +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + avg(value), + locf(min(value)) AS locf, + locf(min(value),23::float) AS locf1, + locf(min(value),(SELECT 42::float)) AS locf2, + locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3 +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 1,2,3; + time | device_id | sensor_id | avg | locf | locf1 | locf2 | locf3 +------+-----------+-----------+-----+------+-------+-------+------- + 0 | 1 | 1 | 5 | 5 | 5 | 5 | 5 + 0 | 1 | 2 | | | 23 | 42 | -100 + 5 | 1 | 1 | | 5 | 5 | 5 | 5 + 5 | 1 | 2 | 10 | 10 | 10 | 10 | 10 + 10 | 1 | 1 | | 5 | 5 | 5 | 5 + 10 | 1 | 2 | | 10 | 10 | 10 | 10 +(6 rows) + +-- test locf with correlated subquery and window functions +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)), + sum(locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1))) OVER (PARTITION BY device_id, sensor_id ROWS 1 PRECEDING) +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3; + time | device_id | sensor_id | locf | sum +------+-----------+-----------+------+------ + 0 | 1 | 1 | 5 | 5 + 5 | 1 | 1 | 5 | 10 + 10 | 1 | 1 | 5 | 10 + 0 | 1 | 2 | -100 | -100 + 5 | 1 | 2 | 10 | -90 + 10 | 1 | 2 | 10 | 20 +(6 rows) + +-- test JOINs +SELECT + time_bucket_gapfill(1,time,0,5) as time, + device_id, + d.name, + sensor_id, + s.name, + avg(m.value) +FROM :METRICS m +INNER JOIN devices d USING(device_id) +INNER JOIN sensors s USING(sensor_id) +WHERE time BETWEEN 0 AND 5 +GROUP BY 1,2,3,4,5; + time | device_id | name | sensor_id | name | avg +------+-----------+----------+-----------+----------+----- + 0 | 1 | Device 1 | 1 | Sensor 1 | 5 + 1 | 1 | Device 1 | 1 | Sensor 1 | + 2 | 1 | Device 1 | 1 | Sensor 1 | + 3 | 1 | Device 1 | 1 | Sensor 1 | + 4 | 1 | Device 1 | 1 | Sensor 1 | + 0 | 1 | Device 1 | 2 | Sensor 2 | + 1 | 1 | Device 1 | 2 | Sensor 2 | + 2 | 1 | Device 1 | 2 | Sensor 2 | + 3 | 1 | Device 1 | 2 | Sensor 2 | + 4 | 1 | Device 1 | 2 | Sensor 2 | + 5 | 1 | Device 1 | 2 | Sensor 2 | 10 +(11 rows) + +-- test interpolate with correlated subquery +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + avg(value), + interpolate(min(value)) AS ip, + interpolate(min(value),(-5,-5.0::float),(15,20.0::float)) AS ip1, + interpolate(min(value),(SELECT (-10,-10.0::float)),(SELECT (15,20.0::float))) AS ip2, + interpolate( + min(value), + (SELECT (time,value) FROM :METRICS m2 + WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time DESC LIMIT 1), + (SELECT (time,value) FROM :METRICS m2 + WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time LIMIT 1) + ) AS ip3 +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 2,3,1; + time | device_id | sensor_id | avg | ip | ip1 | ip2 | ip3 +------+-----------+-----------+-----+----+-----+------------------+------------------ + 0 | 1 | 1 | 5 | 5 | 5 | 5 | 5 + 5 | 1 | 1 | | | 10 | 10 | 4.75 + 10 | 1 | 1 | | | 15 | 15 | 4.5 + 0 | 1 | 2 | | | 2.5 | 3.33333333333333 | 4.76190476190476 + 5 | 1 | 2 | 10 | 10 | 10 | 10 | 10 + 10 | 1 | 2 | | | 15 | 15 | 4.21052631578947 +(6 rows) + +-- test interpolate with correlated subquery and window function +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + interpolate( + min(value), + (SELECT (time,value) FROM :METRICS m2 + WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time DESC LIMIT 1), + (SELECT (time,value) FROM :METRICS m2 + WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time LIMIT 1) + ), + sum(interpolate( + min(value), + (SELECT (time,value) FROM :METRICS m2 + WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time DESC LIMIT 1), + (SELECT (time,value) FROM :METRICS m2 + WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time LIMIT 1) + )) OVER (PARTITION BY device_id, sensor_id ROWS 1 PRECEDING) +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 2,3,1; + time | device_id | sensor_id | interpolate | sum +------+-----------+-----------+------------------+------------------ + 0 | 1 | 1 | 5 | 5 + 5 | 1 | 1 | 4.75 | 9.75 + 10 | 1 | 1 | 4.5 | 9.25 + 0 | 1 | 2 | 4.76190476190476 | 4.76190476190476 + 5 | 1 | 2 | 10 | 14.7619047619048 + 10 | 1 | 2 | 4.21052631578947 | 14.2105263157895 +(6 rows) + +-- test subqueries +-- subqueries will alter the shape of the plan and top-level constraints +-- might not end up in top-level of jointree +SELECT + time_bucket_gapfill(1,m1.time) +FROM :METRICS m1 +WHERE m1.time >=0 AND m1.time < 2 AND device_id IN (SELECT device_id FROM :METRICS) +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test inner join with constraints in join condition +SELECT + time_bucket_gapfill(1,m2.time) +FROM :METRICS m1 INNER JOIN :METRICS m2 ON m1.time=m2.time AND m2.time >=0 AND m2.time < 2 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test actual table +SELECT + time_bucket_gapfill(1,time) +FROM :METRICS +WHERE time >=0 AND time < 2 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test with table alias +SELECT + time_bucket_gapfill(1,time) +FROM :METRICS m +WHERE m.time >=0 AND m.time < 2 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test with 2 tables +SELECT + time_bucket_gapfill(1,m.time) +FROM :METRICS m, :METRICS m2 +WHERE m.time >=0 AND m.time < 2 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test prepared statement with locf with lookup query +PREPARE prep_gapfill AS +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) +FROM :METRICS m1 +WHERE time >= 0 AND time < 5 +GROUP BY 1,2,3; +-- execute 10 times to make sure turning it into generic plan works +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +DEALLOCATE prep_gapfill; +-- test prepared statement with interpolate with lookup query +PREPARE prep_gapfill AS +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + interpolate( + min(value), + (SELECT (time,value) FROM :METRICS m2 + WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time DESC LIMIT 1), + (SELECT (time,value) FROM :METRICS m2 + WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time LIMIT 1) + ) +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 2,3,1; +-- execute 10 times to make sure turning it into generic plan works +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +DEALLOCATE prep_gapfill; +-- test prepared statement with variable gapfill arguments +PREPARE prep_gapfill(int,int,int) AS +SELECT + time_bucket_gapfill($1,time,$2,$3) AS time, + device_id, + sensor_id, + min(value) +FROM :METRICS m1 +WHERE time >= $2 AND time < $3 AND device_id=1 AND sensor_id=1 +GROUP BY 1,2,3 ORDER BY 2,3,1; +-- execute 10 times to make sure turning it into generic plan works +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +DEALLOCATE prep_gapfill; +-- Tests without tables +-- test locf and interpolate call without gapfill +SELECT locf(1); + locf + 1 +(1 row) + +SELECT interpolate(1); + interpolate + 1 +(1 row) + +-- test locf and interpolate call with NULL input +SELECT locf(NULL::int); + locf + +(1 row) + +SELECT interpolate(NULL::bigint); + interpolate + +(1 row) + +\set ON_ERROR_STOP 0 +-- test time_bucket_gapfill not top level function call +SELECT + 1 + time_bucket_gapfill(1,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: no top level time_bucket_gapfill in group by clause +-- test locf with treat_null_as_missing not BOOL +SELECT + time_bucket_gapfill(1,time,1,11), + locf(min(time),treat_null_as_missing:=1) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: function locf(integer, treat_null_as_missing => integer) does not exist +LINE 3: locf(min(time),treat_null_as_missing:=1) + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +-- test locf with treat_null_as_missing not literal +SELECT + time_bucket_gapfill(1,time,1,11), + locf(min(time),treat_null_as_missing:=random()>0) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid locf argument: treat_null_as_missing must be a BOOL literal +-- test interpolate lookup query with 1 element in record +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),next=>(SELECT ROW(10))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: interpolate RECORD arguments must have 2 elements +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),prev=>(SELECT ROW(10))) +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: interpolate RECORD arguments must have 2 elements +-- test interpolate lookup query with 3 elements in record +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),next=>(SELECT (10,10,10))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: interpolate RECORD arguments must have 2 elements +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),prev=>(SELECT (10,10,10))) +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: interpolate RECORD arguments must have 2 elements +-- test interpolate lookup query with mismatching time datatype +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),next=>(SELECT (10::float,10))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: first argument of interpolate returned record must match used timestamp datatype +DETAIL: Returned type double precision does not match expected type integer. +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),prev=>(SELECT (10::float,10))) +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: first argument of interpolate returned record must match used timestamp datatype +DETAIL: Returned type double precision does not match expected type integer. +-- test interpolate lookup query with mismatching value datatype +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),next=>(SELECT (10,10::float))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: second argument of interpolate returned record must match used interpolate datatype +DETAIL: Returned type double precision does not match expected type integer. +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),prev=>(SELECT (10,10::float))) +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: second argument of interpolate returned record must match used interpolate datatype +DETAIL: Returned type double precision does not match expected type integer. +-- test interpolate with unsupported datatype +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(text 'text') +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: function interpolate(text) does not exist +LINE 3: interpolate(text 'text') + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(interval '1d') +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: function interpolate(interval) does not exist +LINE 3: interpolate(interval '1d') + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +-- test multiple time_bucket_gapfill calls +SELECT + time_bucket_gapfill(1,time,1,11),time_bucket_gapfill(1,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple time_bucket_gapfill calls not allowed +-- test nested time_bucket_gapfill calls +SELECT + time_bucket_gapfill(1,time_bucket_gapfill(1,time,1,11),1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple time_bucket_gapfill calls not allowed +-- test nested locf calls +SELECT + time_bucket_gapfill(1,time,1,11), + locf(locf(min(time))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple interpolate/locf function calls per resultset column not supported +-- test nested interpolate calls +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(interpolate(min(time))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple interpolate/locf function calls per resultset column not supported +-- test mixed locf/interpolate calls +SELECT + time_bucket_gapfill(1,time,1,11), + locf(interpolate(min(time))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple interpolate/locf function calls per resultset column not supported +-- test window function inside locf +SELECT + time_bucket_gapfill(1,time,1,11), + locf(avg(min(time)) OVER ()) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: window functions must not be below locf +-- test nested window functions +-- prevented by postgres +SELECT + time_bucket_gapfill(1,time,1,11), + avg(avg(min(time)) OVER ()) OVER () +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: window function calls cannot be nested +LINE 3: avg(avg(min(time)) OVER ()) OVER () + ^ +-- test multiple window functions in single column +SELECT + time_bucket_gapfill(1,time,1,11), + avg(min(time)) OVER () + avg(min(time)) OVER () +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple window function calls per column not supported +-- test locf not toplevel +SELECT + time_bucket_gapfill(1,time,1,11), + 1 + locf(min(time)) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: locf must be toplevel function call +-- test locf inside aggregate +SELECT + time_bucket_gapfill(1,time,1,11), + min(min(locf(time))) OVER () +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: aggregate functions must be below locf +-- test NULL args +SELECT + time_bucket_gapfill(NULL,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width cannot be NULL +SELECT + time_bucket_gapfill(1,NULL,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts cannot be NULL +SELECT + time_bucket_gapfill(1,time,NULL,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +SELECT + time_bucket_gapfill(1,time,1,NULL) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer finish from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +SELECT + time_bucket_gapfill(NULL,time,'Europe/Berlin','2000-06-01','2001-06-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2001-01-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width cannot be NULL +SELECT + time_bucket_gapfill('1day',NULL,'Europe/Berlin','2000-06-01','2001-06-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2001-01-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts cannot be NULL +SELECT + time_bucket_gapfill('1day',time,NULL,'2000-06-01','2001-06-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2001-01-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: timezone cannot be NULL +-- test 0 bucket_width +SELECT + time_bucket_gapfill(0,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +SELECT + time_bucket_gapfill('0d',time,'2000-01-01','2000-02-01') +FROM (VALUES ('2000-01-01'::date),('2000-02-01'::date)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +SELECT + time_bucket_gapfill('0d',time,'2000-01-01','2000-02-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2000-02-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +-- test negative bucket_width +SELECT + time_bucket_gapfill(-1,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +SELECT + time_bucket_gapfill('-1d',time,'2000-01-01','2000-02-01') +FROM (VALUES ('2000-01-01'::date),('2000-02-01'::date)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +SELECT + time_bucket_gapfill('-1d',time,'2000-01-01','2000-02-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2000-02-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +-- test subqueries as interval, start and stop (not supported atm) +SELECT + time_bucket_gapfill((SELECT 1),time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be a simple expression +SELECT + time_bucket_gapfill(1,time,(SELECT 1),11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start must be a simple expression +SELECT + time_bucket_gapfill(1,time,1,(SELECT 11)) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish must be a simple expression +\set ON_ERROR_STOP 1 +-- test time_bucket_gapfill without aggregation +-- this will not trigger gapfilling +SELECT + time_bucket_gapfill(1,time,1,11) +FROM (VALUES (1),(2)) v(time); + time_bucket_gapfill + 1 + 2 +(2 rows) + +SELECT + time_bucket_gapfill(1,time,1,11), + avg(time) OVER () +FROM (VALUES (1),(2)) v(time); + time_bucket_gapfill | avg +---------------------+-------------------- + 1 | 1.5000000000000000 + 2 | 1.5000000000000000 +(2 rows) + +-- test int int2/4/8 +SELECT + time_bucket_gapfill(1::int2,time::int2,0::int2,6::int2) +FROM (VALUES (1),(4)) v(time) +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 + 3 + 4 + 5 +(6 rows) + +SELECT + time_bucket_gapfill(1::int4,time::int4,0::int4,6::int4) +FROM (VALUES (1),(4)) v(time) +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 + 3 + 4 + 5 +(6 rows) + +SELECT + time_bucket_gapfill(1::int8,time::int8,0::int8,6::int8) +FROM (VALUES (1),(4)) v(time) +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 + 3 + 4 + 5 +(6 rows) + +-- test non-aligned bucket start +SELECT + time_bucket_gapfill(10,time,5,40) +FROM (VALUES (11),(22)) v(time) +GROUP BY 1; + time_bucket_gapfill + 0 + 10 + 20 + 30 +(4 rows) + +-- simple gapfill query +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + min(value) AS value +FROM (values (-10,1),(10,2),(11,3),(12,4),(22,5),(30,6),(66,7)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + -10 | 1 + 0 | + 10 | 2 + 20 | 5 + 30 | 6 + 40 | + 60 | 7 +(7 rows) + +-- test references to different columns +SELECT + time_bucket_gapfill(1,t,0,5) as t, + min(t),max(t),min(v),max(v) +FROM(VALUES (1,3),(2,5)) tb(t,v) +GROUP BY 1 ORDER BY 1; + t | min | max | min | max +---+-----+-----+-----+----- + 0 | | | | + 1 | 1 | 1 | 3 | 3 + 2 | 2 | 2 | 5 | 5 + 3 | | | | + 4 | | | | +(5 rows) + +-- test passing of values outside boundaries +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) +FROM (VALUES (-1),(1),(3),(6)) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +---------------------+----- + -1 | -1 + 0 | + 1 | 1 + 2 | + 3 | 3 + 4 | + 6 | 6 +(7 rows) + +-- test gap fill before first row and after last row +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) +FROM (VALUES (1),(2),(3)) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +---------------------+----- + 0 | + 1 | 1 + 2 | 2 + 3 | 3 + 4 | +(5 rows) + +-- test gap fill without rows in resultset +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) +FROM (VALUES (1),(2),(3)) v(time) +WHERE false +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +---------------------+----- + 0 | + 1 | + 2 | + 3 | + 4 | +(5 rows) + +-- test coalesce +SELECT + time_bucket_gapfill(1,time,0,5), + coalesce(min(time),0), + coalesce(min(value),0), + coalesce(min(value),7) +FROM (VALUES (1,1),(2,2),(3,3)) v(time,value) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | coalesce | coalesce | coalesce +---------------------+----------+----------+---------- + 0 | 0 | 0 | 7 + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 + 4 | 0 | 0 | 7 +(5 rows) + +-- test case +SELECT + time_bucket_gapfill(1,time,0,5), + min(time), + CASE WHEN min(time) IS NOT NULL THEN min(time) ELSE -1 END, + CASE WHEN min(time) IS NOT NULL THEN min(time) + 7 ELSE 0 END, + CASE WHEN 1 = 1 THEN 1 ELSE 0 END +FROM (VALUES (1,1),(2,2),(3,3)) v(time,value) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min | case | case | case +---------------------+-----+------+------+------ + 0 | | -1 | 0 | 1 + 1 | 1 | 1 | 8 | 1 + 2 | 2 | 2 | 9 | 1 + 3 | 3 | 3 | 10 | 1 + 4 | | -1 | 0 | 1 +(5 rows) + +-- test constants +SELECT + time_bucket_gapfill(1,time,0,5), + min(time), min(time), 4 as c +FROM (VALUES (1),(2),(3)) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min | min | c +---------------------+-----+-----+--- + 0 | | | 4 + 1 | 1 | 1 | 4 + 2 | 2 | 2 | 4 + 3 | 3 | 3 | 4 + 4 | | | 4 +(5 rows) + +-- test column reordering +SELECT + 1 as c1, '2' as c2, + time_bucket_gapfill(1,time,0,5), + 3.0 as c3, + min(time), min(time), 4 as c4 +FROM (VALUES (1),(2),(3)) v(time) +GROUP BY 3 ORDER BY 3; + c1 | c2 | time_bucket_gapfill | c3 | min | min | c4 +----+----+---------------------+-----+-----+-----+---- + 1 | 2 | 0 | 3.0 | | | 4 + 1 | 2 | 1 | 3.0 | 1 | 1 | 4 + 1 | 2 | 2 | 3.0 | 2 | 2 | 4 + 1 | 2 | 3 | 3.0 | 3 | 3 | 4 + 1 | 2 | 4 | 3.0 | | | 4 +(5 rows) + +-- test timestamptz +SELECT + time_bucket_gapfill(INTERVAL '6h',time,TIMESTAMPTZ '2000-01-01',TIMESTAMPTZ '2000-01-02'), + min(time) +FROM (VALUES (TIMESTAMPTZ '2000-01-01 9:00:00'),(TIMESTAMPTZ '2000-01-01 18:00:00')) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +------------------------------+------------------------------ + Fri Dec 31 22:00:00 1999 PST | + Sat Jan 01 04:00:00 2000 PST | Sat Jan 01 09:00:00 2000 PST + Sat Jan 01 10:00:00 2000 PST | + Sat Jan 01 16:00:00 2000 PST | Sat Jan 01 18:00:00 2000 PST + Sat Jan 01 22:00:00 2000 PST | +(5 rows) + +-- test timestamp +SELECT + time_bucket_gapfill(INTERVAL '6h',time,TIMESTAMP '2000-01-01',TIMESTAMP '2000-01-02'), + min(time) +FROM (VALUES (TIMESTAMP '2000-01-01 9:00:00'),(TIMESTAMP '2000-01-01 18:00:00')) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +--------------------------+-------------------------- + Sat Jan 01 00:00:00 2000 | + Sat Jan 01 06:00:00 2000 | Sat Jan 01 09:00:00 2000 + Sat Jan 01 12:00:00 2000 | + Sat Jan 01 18:00:00 2000 | Sat Jan 01 18:00:00 2000 +(4 rows) + +-- test date +SELECT + time_bucket_gapfill(INTERVAL '1w',time,DATE '2000-01-01',DATE '2000-02-10'), + min(time) +FROM (VALUES (DATE '2000-01-08'),(DATE '2000-01-22')) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +---------------------+------------ + 12-27-1999 | + 01-03-2000 | 01-08-2000 + 01-10-2000 | + 01-17-2000 | 01-22-2000 + 01-24-2000 | + 01-31-2000 | + 02-07-2000 | +(7 rows) + +-- test grouping by non-time columns +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m +FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +GROUP BY 1,id ORDER BY 2,1; + time | id | m +------+----+--- + 0 | 1 | + 1 | 1 | 1 + 2 | 1 | + 3 | 1 | + 4 | 1 | + 0 | 2 | + 1 | 2 | + 2 | 2 | 2 + 3 | 2 | + 4 | 2 | +(10 rows) + +-- test grouping by non-time columns with no rows in resultset +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m +FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +WHERE false +GROUP BY 1,id ORDER BY 2,1; + time | id | m +------+----+--- +(0 rows) + +-- test duplicate columns in GROUP BY +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + id, + min(value) as m +FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +GROUP BY 1,2,3 ORDER BY 2,1; + time | id | id | m +------+----+----+--- + 0 | 1 | 1 | + 1 | 1 | 1 | 1 + 2 | 1 | 1 | + 3 | 1 | 1 | + 4 | 1 | 1 | + 0 | 2 | 2 | + 1 | 2 | 2 | + 2 | 2 | 2 | 2 + 3 | 2 | 2 | + 4 | 2 | 2 | +(10 rows) + +-- test grouping by columns not in resultset +SELECT + time_bucket_gapfill(1,time,0,5) as time, + min(value) as m +FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +GROUP BY 1,id ORDER BY id,1; + time | m +------+--- + 0 | + 1 | 1 + 2 | + 3 | + 4 | + 0 | + 1 | + 2 | 2 + 3 | + 4 | +(10 rows) + +-- test grouping by non-time columns with text columns +SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + min(value) as m +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color ORDER BY 2,1; + time | color | m +------+-------+--- + 0 | blue | + 1 | blue | 1 + 2 | blue | + 3 | blue | + 4 | blue | + 0 | red | + 1 | red | + 2 | red | 2 + 3 | red | + 4 | red | +(10 rows) + +-- test grouping by non-time columns with text columns with no rows in resultset +SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + min(value) as m +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +WHERE false +GROUP BY 1,color ORDER BY 2,1; + time | color | m +------+-------+--- +(0 rows) + +--- test insert into SELECT +CREATE TABLE gapfill_insert_test(id INT); +INSERT INTO gapfill_insert_test SELECT time_bucket_gapfill(1,time,1,5) FROM (VALUES (1),(2)) v(time) GROUP BY 1 ORDER BY 1; +SELECT * FROM gapfill_insert_test; + id + 1 + 2 + 3 + 4 +(4 rows) + +-- test join +SELECT t1.*,t2.m FROM +( + SELECT + time_bucket_gapfill(1,time,0,5) as time, color, min(value) as m + FROM + (VALUES (1,'red',1),(2,'blue',2)) v(time,color,value) + GROUP BY 1,color ORDER BY 2,1 +) t1 INNER JOIN +( + SELECT + time_bucket_gapfill(1,time,0,5) as time, color, min(value) as m + FROM + (VALUES (3,'red',1),(4,'blue',2)) v(time,color,value) + GROUP BY 1,color ORDER BY 2,1 +) t2 ON t1.time = t2.time AND t1.color=t2.color; + time | color | m | m +------+-------+---+--- + 0 | blue | | + 1 | blue | | + 2 | blue | 2 | + 3 | blue | | + 4 | blue | | 2 + 0 | red | | + 1 | red | 1 | + 2 | red | | + 3 | red | | 1 + 4 | red | | +(10 rows) + +-- test join with locf +SELECT t1.*,t2.m FROM +( + SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + locf(min(value)) as locf + FROM + (VALUES (0,'red',1),(0,'blue',2)) v(time,color,value) + GROUP BY 1,color ORDER BY 2,1 +) t1 INNER JOIN +( + SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + locf(min(value)) as m + FROM + (VALUES (3,'red',1),(4,'blue',2)) v(time,color,value) + GROUP BY 1,color ORDER BY 2,1 +) t2 ON t1.time = t2.time AND t1.color=t2.color; + time | color | locf | m +------+-------+------+--- + 0 | blue | 2 | + 1 | blue | 2 | + 2 | blue | 2 | + 3 | blue | 2 | + 4 | blue | 2 | 2 + 0 | red | 1 | + 1 | red | 1 | + 2 | red | 1 | + 3 | red | 1 | 1 + 4 | red | 1 | 1 +(10 rows) + +-- test locf +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value)) AS value +FROM (values (10,9),(20,3),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | 3 + 40 | 3 + 50 | 6 +(6 rows) + +-- test locf with NULLs in resultset +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value)) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | + 40 | + 50 | 6 +(6 rows) + +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=false) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | + 40 | + 50 | 6 +(6 rows) + +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=NULL) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | + 40 | + 50 | 6 +(6 rows) + +-- test locf with NULLs in resultset and treat_null_as_missing +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=true) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | 3 + 40 | 3 + 50 | 6 +(6 rows) + +-- test locf with NULLs in first row of resultset and treat_null_as_missing with lookup query +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=false, prev := (SELECT 100)) AS v1, + locf(min(value),treat_null_as_missing:=true, prev := (SELECT 100)) AS v2 +FROM (values (0,NULL),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | v1 | v2 +------+----+----- + 0 | | 100 + 10 | | 100 + 20 | | 100 + 30 | | 100 + 40 | | 100 + 50 | 6 | 6 +(6 rows) + +-- test locf with NULLs in resultset and treat_null_as_missing with resort +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=true) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1 DESC; + time | value +------+------- + 50 | 6 + 40 | 3 + 30 | 3 + 20 | 3 + 10 | 9 + 0 | +(6 rows) + +-- test locf with constants +SELECT + time_bucket_gapfill(1,time,0,5), + 2, + locf(min(value)) +FROM (VALUES (0,1,3),(4,2,3)) v(time,value) +GROUP BY 1; + time_bucket_gapfill | ?column? | locf +---------------------+----------+------ + 0 | 2 | 1 + 1 | 2 | 1 + 2 | 2 | 1 + 3 | 2 | 1 + 4 | 2 | 2 +(5 rows) + +-- test expressions inside locf +SELECT + time_bucket_gapfill(1,time,0,5), + locf(min(value)), + locf(4), + locf(4 + min(value)) +FROM (VALUES (0,1,3),(4,2,3)) v(time,value) +GROUP BY 1; + time_bucket_gapfill | locf | locf | locf +---------------------+------+------+------ + 0 | 1 | 4 | 5 + 1 | 1 | 4 | 5 + 2 | 1 | 4 | 5 + 3 | 1 | 4 | 5 + 4 | 2 | 4 | 6 +(5 rows) + +-- test locf with out of boundary lookup +SELECT + time_bucket_gapfill(10,time,0,70) AS time, + locf(min(value),(SELECT 100)) AS value +FROM (values (20,9),(40,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | 100 + 10 | 100 + 20 | 9 + 30 | 9 + 40 | 6 + 50 | 6 + 60 | 6 +(7 rows) + +-- test locf with different datatypes +SELECT + time_bucket_gapfill(1,time,0,5) as time, + locf(min(v1)) AS text, + locf(min(v2)) AS "int[]", + locf(min(v3)) AS "text 4/8k" +FROM (VALUES + (1,'foo',ARRAY[1,2,3],repeat('4k',2048)), + (3,'bar',ARRAY[3,4,5],repeat('8k',4096)) +) v(time,v1,v2,v3) +GROUP BY 1; + time | text | int[] | text 4/8k +------+------+---------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 0 | | | + 1 | foo | {1,2,3} | 4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k + 2 | foo | {1,2,3} | 4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k + 3 | bar | {3,4,5} | 8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k + 4 | bar | {3,4,5} | 8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k +(5 rows) + +-- test locf with different datatypes and treat_null_as_missing +SELECT + time_bucket_gapfill(1,time,0,5) as time, + locf(min(v1),treat_null_as_missing:=true) AS text, + locf(min(v2),treat_null_as_missing:=true) AS "int[]", + locf(min(v3),treat_null_as_missing:=true) AS "text 4/8k" +FROM (VALUES + (1,'foo',ARRAY[1,2,3],repeat('4k',2048)), + (2,NULL,NULL,NULL), + (3,'bar',ARRAY[3,4,5],repeat('8k',4096)) +) v(time,v1,v2,v3) +GROUP BY 1; + time | text | int[] | text 4/8k +------+------+---------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 0 | | | + 1 | foo | {1,2,3} | 4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k + 2 | foo | {1,2,3} | 4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k + 3 | bar | {3,4,5} | 8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k + 4 | bar | {3,4,5} | 8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k +(5 rows) + +-- test interpolate +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + interpolate(min(value)) AS value +FROM (values (0,1),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | 1 + 10 | 2 + 20 | 3 + 30 | 4 + 40 | 5 + 50 | 6 +(6 rows) + +-- test interpolate with NULL values +SELECT + time_bucket_gapfill(1,time,0,5) AS time, + interpolate(avg(temp)) AS temp +FROM (VALUES (0,0),(2,NULL),(5,5)) v(time,temp) +GROUP BY 1; + time | temp +------+------ + 0 | 0 + 1 | + 2 | + 3 | + 4 | + 5 | 5 +(6 rows) + +-- test interpolate datatypes +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + interpolate(min(v1)) AS "smallint", + interpolate(min(v2)) AS "int", + interpolate(min(v3)) AS "bigint", + interpolate(min(v4)) AS "float4", + interpolate(min(v5)) AS "float8" +FROM (values (0,-3::smallint,-3::int,-3::bigint,-3::float4,-3::float8),(50,3::smallint,3::int,3::bigint,3::float4,3::float8)) v(time,v1,v2,v3,v4,v5) +GROUP BY 1 ORDER BY 1; + time | smallint | int | bigint | float4 | float8 +------+----------+-----+--------+--------+-------- + 0 | -3 | -3 | -3 | -3 | -3 + 10 | -2 | -2 | -2 | -1.8 | -1.8 + 20 | -1 | -1 | -1 | -0.6 | -0.6 + 30 | 1 | 1 | 1 | 0.6 | 0.6 + 40 | 2 | 2 | 2 | 1.8 | 1.8 + 50 | 3 | 3 | 3 | 3 | 3 +(6 rows) + +-- test interpolate datatypes with negative time +SELECT + time_bucket_gapfill(10,time,-40,30) AS time, + interpolate(min(v1)) AS "smallint", + interpolate(min(v2)) AS "int", + interpolate(min(v3)) AS "bigint", + interpolate(min(v4)) AS "float4", + interpolate(min(v5)) AS "float8" +FROM (values (-40,-3::smallint,-3::int,-3::bigint,-3::float4,-3::float8),(20,3::smallint,3::int,3::bigint,3::float4,3::float8)) v(time,v1,v2,v3,v4,v5) +GROUP BY 1 ORDER BY 1; + time | smallint | int | bigint | float4 | float8 +------+----------+-----+--------+--------+-------- + -40 | -3 | -3 | -3 | -3 | -3 + -30 | -2 | -2 | -2 | -2 | -2 + -20 | -1 | -1 | -1 | -1 | -1 + -10 | 0 | 0 | 0 | 0 | 0 + 0 | 1 | 1 | 1 | 1 | 1 + 10 | 2 | 2 | 2 | 2 | 2 + 20 | 3 | 3 | 3 | 3 | 3 +(7 rows) + +-- test interpolate with multiple groupings +SELECT + time_bucket_gapfill(5,time,0,11), + device, + interpolate(min(v1),(SELECT (-10,-10)),(SELECT (20,10))) +FROM (VALUES (5,1,0),(5,2,0)) as v(time,device,v1) +GROUP BY 1,2 ORDER BY 2,1; + time_bucket_gapfill | device | interpolate +---------------------+--------+------------- + 0 | 1 | -3 + 5 | 1 | 0 + 10 | 1 | 3 + 0 | 2 | -3 + 5 | 2 | 0 + 10 | 2 | 3 +(6 rows) + +-- test cte with gap filling in outer query +WITH data AS ( + SELECT * FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +) +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m +FROM data +GROUP BY 1,id; + time | id | m +------+----+--- + 0 | 1 | + 1 | 1 | 1 + 2 | 1 | + 3 | 1 | + 4 | 1 | + 0 | 2 | + 1 | 2 | + 2 | 2 | 2 + 3 | 2 | + 4 | 2 | +(10 rows) + +-- test cte with gap filling in inner query +WITH gapfill AS ( + SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m + FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) + GROUP BY 1,id +) +SELECT * FROM gapfill; + time | id | m +------+----+--- + 0 | 1 | + 1 | 1 | 1 + 2 | 1 | + 3 | 1 | + 4 | 1 | + 0 | 2 | + 1 | 2 | + 2 | 2 | 2 + 3 | 2 | + 4 | 2 | +(10 rows) + +-- test window functions +SELECT + time_bucket_gapfill(10,time,0,60), + interpolate(min(time)), + lag(min(time)) OVER () +FROM (VALUES (0),(50)) v(time) +GROUP BY 1; + time_bucket_gapfill | interpolate | lag +---------------------+-------------+----- + 0 | 0 | + 10 | 10 | 0 + 20 | 20 | + 30 | 30 | + 40 | 40 | + 50 | 50 | +(6 rows) + +-- test window functions with multiple windows +SELECT + time_bucket_gapfill(1,time,0,10), + interpolate(min(time)), + row_number() OVER (), + locf(min(time)), + sum(interpolate(min(time))) OVER (ROWS 1 PRECEDING), + sum(interpolate(min(time))) OVER (ROWS 2 PRECEDING), + sum(interpolate(min(time))) OVER (ROWS 3 PRECEDING), + sum(interpolate(min(time))) OVER (ROWS 4 PRECEDING) +FROM (VALUES (0),(9)) v(time) +GROUP BY 1; + time_bucket_gapfill | interpolate | row_number | locf | sum | sum | sum | sum +---------------------+-------------+------------+------+-----+-----+-----+----- + 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 + 1 | 1 | 2 | 0 | 1 | 1 | 1 | 1 + 2 | 2 | 3 | 0 | 3 | 3 | 3 | 3 + 3 | 3 | 4 | 0 | 5 | 6 | 6 | 6 + 4 | 4 | 5 | 0 | 7 | 9 | 10 | 10 + 5 | 5 | 6 | 0 | 9 | 12 | 14 | 15 + 6 | 6 | 7 | 0 | 11 | 15 | 18 | 20 + 7 | 7 | 8 | 0 | 13 | 18 | 22 | 25 + 8 | 8 | 9 | 0 | 15 | 21 | 26 | 30 + 9 | 9 | 10 | 9 | 17 | 24 | 30 | 35 +(10 rows) + +-- test window functions with constants +SELECT + time_bucket_gapfill(1,time,0,5), + min(time), + 4 as c, + lag(min(time)) OVER () +FROM (VALUES (1),(2),(3)) v(time) +GROUP BY 1; + time_bucket_gapfill | min | c | lag +---------------------+-----+---+----- + 0 | | 4 | + 1 | 1 | 4 | + 2 | 2 | 4 | 1 + 3 | 3 | 4 | 2 + 4 | | 4 | 3 +(5 rows) + +--test window functions with locf +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) AS "min", + lag(min(time)) over () AS lag_min, + lead(min(time)) over () AS lead_min, + locf(min(time)) AS locf, + lag(locf(min(time))) over () AS lag_locf, + lead(locf(min(time))) over () AS lead_locf +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; + time_bucket_gapfill | min | lag_min | lead_min | locf | lag_locf | lead_locf +---------------------+-----+---------+----------+------+----------+----------- + 0 | | | 1 | | | 1 + 1 | 1 | | 2 | 1 | | 2 + 2 | 2 | 1 | | 2 | 1 | 2 + 3 | | 2 | | 2 | 2 | 2 + 4 | | | | 2 | 2 | +(5 rows) + +--test window functions with interpolate +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) AS "min", + lag(min(time)) over () AS lag_min, + lead(min(time)) over () AS lead_min, + interpolate(min(time)) AS interpolate, + lag(interpolate(min(time))) over () AS lag_interpolate, + lead(interpolate(min(time))) over () AS lead_interpolate +FROM (VALUES (1),(3)) v(time) +GROUP BY 1; + time_bucket_gapfill | min | lag_min | lead_min | interpolate | lag_interpolate | lead_interpolate +---------------------+-----+---------+----------+-------------+-----------------+------------------ + 0 | | | 1 | | | 1 + 1 | 1 | | | 1 | | 2 + 2 | | 1 | 3 | 2 | 1 | 3 + 3 | 3 | | | 3 | 2 | + 4 | | 3 | | | 3 | +(5 rows) + +--test window functions with expressions +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) AS "min", + lag(min(time)) over () AS lag_min, + 1 + lag(min(time)) over () AS lag_min, + interpolate(min(time)) AS interpolate, + lag(interpolate(min(time))) over () AS lag_interpolate, + 1 + lag(interpolate(min(time))) over () AS lag_interpolate +FROM (VALUES (1),(3)) v(time) +GROUP BY 1; + time_bucket_gapfill | min | lag_min | lag_min | interpolate | lag_interpolate | lag_interpolate +---------------------+-----+---------+---------+-------------+-----------------+----------------- + 0 | | | | | | + 1 | 1 | | | 1 | | + 2 | | 1 | 2 | 2 | 1 | 2 + 3 | 3 | | | 3 | 2 | 3 + 4 | | 3 | 4 | | 3 | 4 +(5 rows) + +--test row_number/rank/percent_rank/... window functions with gapfill reference +SELECT + time_bucket_gapfill(1,time,0,5), + ntile(2) OVER () AS ntile_2, + ntile(3) OVER () AS ntile_3, + ntile(5) OVER () AS ntile_5, + row_number() OVER (), + cume_dist() OVER (ORDER BY time_bucket_gapfill(1,time,0,5)), + rank() OVER (), + rank() OVER (ORDER BY time_bucket_gapfill(1,time,0,5)), + percent_rank() OVER (ORDER BY time_bucket_gapfill(1,time,0,5)) +FROM (VALUES (1),(3)) v(time) +GROUP BY 1; + time_bucket_gapfill | ntile_2 | ntile_3 | ntile_5 | row_number | cume_dist | rank | rank | percent_rank +---------------------+---------+---------+---------+------------+-----------+------+------+-------------- + 0 | 1 | 1 | 1 | 1 | 0.2 | 1 | 1 | 0 + 1 | 1 | 1 | 2 | 2 | 0.4 | 1 | 2 | 0.25 + 2 | 1 | 2 | 3 | 3 | 0.6 | 1 | 3 | 0.5 + 3 | 2 | 2 | 4 | 4 | 0.8 | 1 | 4 | 0.75 + 4 | 2 | 3 | 5 | 5 | 1 | 1 | 5 | 1 +(5 rows) + +-- test first_value/last_value/nth_value +SELECT + time_bucket_gapfill(1,time,0,5), + first_value(min(time)) OVER (), + nth_value(min(time),3) OVER (), + last_value(min(time)) OVER () +FROM (VALUES (0),(2),(5)) v(time) +GROUP BY 1; + time_bucket_gapfill | first_value | nth_value | last_value +---------------------+-------------+-----------+------------ + 0 | 0 | 2 | 5 + 1 | 0 | 2 | 5 + 2 | 0 | 2 | 5 + 3 | 0 | 2 | 5 + 4 | 0 | 2 | 5 + 5 | 0 | 2 | 5 +(6 rows) + +-- test window functions with PARTITION BY +SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + row_number() OVER (), + row_number() OVER (PARTITION BY color) +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color ORDER BY 2,1; + time | color | row_number | row_number +------+-------+------------+------------ + 0 | blue | 1 | 1 + 1 | blue | 2 | 2 + 2 | blue | 3 | 3 + 3 | blue | 4 | 4 + 4 | blue | 5 | 5 + 0 | red | 6 | 1 + 1 | red | 7 | 2 + 2 | red | 8 | 3 + 3 | red | 9 | 4 + 4 | red | 10 | 5 +(10 rows) + +-- test multiple windows +\set ON_ERROR_STOP 0 +SELECT + time_bucket_gapfill(1,time,0,11), + first_value(interpolate(min(time))) OVER (ROWS 1 PRECEDING), + interpolate(min(time)), + last_value(interpolate(min(time))) OVER (ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +FROM (VALUES (0),(10)) v(time) +GROUP BY 1; + time_bucket_gapfill | first_value | interpolate | last_value +---------------------+-------------+-------------+------------ + 0 | 0 | 0 | 1 + 1 | 0 | 1 | 2 + 2 | 1 | 2 | 3 + 3 | 2 | 3 | 4 + 4 | 3 | 4 | 5 + 5 | 4 | 5 | 6 + 6 | 5 | 6 | 7 + 7 | 6 | 7 | 8 + 8 | 7 | 8 | 9 + 9 | 8 | 9 | 10 + 10 | 9 | 10 | 10 +(11 rows) + +-- test reorder +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m +FROM + (VALUES (1,1,1),(2,2,2)) v(time,id,value) +GROUP BY 1,id ORDER BY 1,id; + time | id | m +------+----+--- + 0 | 1 | + 0 | 2 | + 1 | 1 | 1 + 1 | 2 | + 2 | 1 | + 2 | 2 | 2 + 3 | 1 | + 3 | 2 | + 4 | 1 | + 4 | 2 | +(10 rows) + +-- test order by locf +SELECT + time_bucket_gapfill(1,time,1,6), + locf(min(time)) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 1,2; + time_bucket_gapfill | locf +---------------------+------ + 1 | + 2 | 2 + 3 | 3 + 4 | 3 + 5 | 3 +(5 rows) + +SELECT + time_bucket_gapfill(1,time,1,6), + locf(min(time)) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 2 NULLS FIRST,1; + time_bucket_gapfill | locf +---------------------+------ + 1 | + 2 | 2 + 3 | 3 + 4 | 3 + 5 | 3 +(5 rows) + +SELECT + time_bucket_gapfill(1,time,1,6), + locf(min(time)) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 2 NULLS LAST,1; + time_bucket_gapfill | locf +---------------------+------ + 2 | 2 + 3 | 3 + 4 | 3 + 5 | 3 + 1 | +(5 rows) + +-- test order by interpolate +SELECT + time_bucket_gapfill(1,time,1,6), + interpolate(min(time),prev:=(0,0)::record) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 1,2; + time_bucket_gapfill | interpolate +---------------------+------------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | + 5 | +(5 rows) + +SELECT + time_bucket_gapfill(1,time,1,6), + interpolate(min(time),prev:=(0,0)::record) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 2 NULLS FIRST,1; + time_bucket_gapfill | interpolate +---------------------+------------- + 4 | + 5 | + 1 | 1 + 2 | 2 + 3 | 3 +(5 rows) + +SELECT + time_bucket_gapfill(1,time,1,6), + interpolate(min(time),prev:=(0,0)::record) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 2 NULLS LAST,1; + time_bucket_gapfill | interpolate +---------------------+------------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | + 5 | +(5 rows) + +-- test queries on hypertable +-- test locf and interpolate together +SELECT + time_bucket_gapfill(interval '1h',time,timestamptz '2018-01-01 05:00:00-8', timestamptz '2018-01-01 07:00:00-8'), + device_id, + locf(avg(v1)) AS locf_v1, + locf(min(v2)) AS locf_v2, + interpolate(avg(v1)) AS interpolate_v1, + interpolate(avg(v2)) AS interpolate_v2 +FROM metrics_tstz +GROUP BY 1,2 +ORDER BY 1,2; + time_bucket_gapfill | device_id | locf_v1 | locf_v2 | interpolate_v1 | interpolate_v2 +------------------------------+-----------+---------+---------+----------------+---------------- + Mon Jan 01 05:00:00 2018 PST | 1 | 0.5 | 10 | 0.5 | 10 + Mon Jan 01 05:00:00 2018 PST | 2 | 0.7 | 20 | 0.7 | 20 + Mon Jan 01 05:00:00 2018 PST | 3 | 0.9 | 30 | 0.9 | 30 + Mon Jan 01 06:00:00 2018 PST | 1 | 0.5 | 10 | 0.25 | 5 + Mon Jan 01 06:00:00 2018 PST | 2 | 0.7 | 20 | 1.05 | 30 + Mon Jan 01 06:00:00 2018 PST | 3 | 0.9 | 30 | 0.9 | 30 + Mon Jan 01 07:00:00 2018 PST | 1 | 0 | 0 | 0 | 0 + Mon Jan 01 07:00:00 2018 PST | 2 | 1.4 | 40 | 1.4 | 40 + Mon Jan 01 07:00:00 2018 PST | 3 | 0.9 | 30 | 0.9 | 30 +(9 rows) + +SELECT + time_bucket_gapfill('12h'::interval,time,'2017-01-01'::timestamptz, '2017-01-02'::timestamptz), + interpolate( + avg(v1), + (SELECT ('2017-01-01'::timestamptz,1::float)), + (SELECT ('2017-01-02'::timestamptz,2::float)) + ) +FROM metrics_tstz WHERE time < '2017-01-01' GROUP BY 1; + time_bucket_gapfill | interpolate +------------------------------+------------------- + Sat Dec 31 16:00:00 2016 PST | 0.666666666666667 + Sun Jan 01 04:00:00 2017 PST | 1.16666666666667 + Sun Jan 01 16:00:00 2017 PST | 1.66666666666667 +(3 rows) + +SELECT + time_bucket_gapfill('12h'::interval,time,'2017-01-01'::timestamptz, '2017-01-02'::timestamptz), + interpolate( + avg(v1), + (SELECT ('2017-01-01'::timestamptz,1::float)), + (SELECT ('2017-01-02'::timestamptz,2::float)) + ) +FROM metrics_tstz WHERE time_bucket_gapfill('12h'::interval,time,'2017-01-01'::timestamptz, '2017-01-02'::timestamptz) < '2017-01-01' GROUP BY 1; + time_bucket_gapfill | interpolate +------------------------------+------------------- + Sat Dec 31 16:00:00 2016 PST | 0.666666666666667 + Sun Jan 01 04:00:00 2017 PST | 1.16666666666667 + Sun Jan 01 16:00:00 2017 PST | 1.66666666666667 +(3 rows) + +-- interpolation with correlated subquery lookup before interval +SELECT + time_bucket_gapfill('1h'::interval,time,'2018-01-01 3:00 PST'::timestamptz, '2018-01-01 8:00 PST'::timestamptz), + device_id, + interpolate( + avg(v1), + (SELECT (time,0.5::float) FROM metrics_tstz m2 WHERE m1.device_id=m2.device_id ORDER BY time DESC LIMIT 1) + ), + avg(v1) +FROM metrics_tstz m1 +WHERE device_id=1 GROUP BY 1,2 ORDER BY 1,2; + time_bucket_gapfill | device_id | interpolate | avg +------------------------------+-----------+-------------+----- + Mon Jan 01 03:00:00 2018 PST | 1 | 0.5 | + Mon Jan 01 04:00:00 2018 PST | 1 | 0.5 | + Mon Jan 01 05:00:00 2018 PST | 1 | 0.5 | 0.5 + Mon Jan 01 06:00:00 2018 PST | 1 | 0.25 | + Mon Jan 01 07:00:00 2018 PST | 1 | 0 | 0 +(5 rows) + +-- interpolation with correlated subquery lookup after interval +SELECT + time_bucket_gapfill('1h'::interval,time,'2018-01-01 5:00 PST'::timestamptz, '2018-01-01 9:00 PST'::timestamptz), + device_id, + interpolate( + avg(v1), + next=>(SELECT (time,v2::float) FROM metrics_tstz m2 WHERE m1.device_id=m2.device_id ORDER BY time LIMIT 1) + ),avg(v1) +FROM metrics_tstz m1 WHERE device_id=1 GROUP BY 1,2 ORDER BY 1,2; + time_bucket_gapfill | device_id | interpolate | avg +------------------------------+-----------+-------------+----- + Mon Jan 01 05:00:00 2018 PST | 1 | 0.5 | 0.5 + Mon Jan 01 06:00:00 2018 PST | 1 | 0.25 | + Mon Jan 01 07:00:00 2018 PST | 1 | 0 | 0 + Mon Jan 01 08:00:00 2018 PST | 1 | -5 | +(4 rows) + +\set ON_ERROR_STOP 0 +-- bucket_width non simple expression +SELECT + time_bucket_gapfill(t,t) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be a simple expression +-- no start/finish and no usable time constraints +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- NULL start/finish and no usable time constraints +SELECT + time_bucket_gapfill(1,t,NULL,NULL) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- no start and no usable time constraints +SELECT + time_bucket_gapfill(1,t,finish:=1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- NULL start expression and no usable time constraints +SELECT + time_bucket_gapfill(1,t,CASE WHEN length(version())>0 THEN NULL::int ELSE NULL::int END,1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- unsupported start expression and no usable time constraints +SELECT + time_bucket_gapfill(1,t,t,1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start must be a simple expression +-- NULL start and no usable time constraints +SELECT + time_bucket_gapfill(1,t,NULL,1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- NULL finish expression and no usable time constraints +SELECT + time_bucket_gapfill(1,t,1,CASE WHEN length(version())>0 THEN NULL::int ELSE NULL::int END) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- unsupported finish expression and no usable time constraints +SELECT + time_bucket_gapfill(1,t,1,t) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish must be a simple expression +-- no finish and no usable time constraints +SELECT + time_bucket_gapfill(1,t,1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer finish from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- NULL finish and no usable time constraints +SELECT + time_bucket_gapfill(1,t,1,NULL) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer finish from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with column reference on right side +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE t > t AND t < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with cast +SELECT + time_bucket_gapfill(1,t1::int8) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 >= 1 AND t1 <= 2 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts needs to refer to a single column if no start or finish is supplied +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with multiple column references +SELECT + time_bucket_gapfill(1,t1+t2) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > 1 AND t1 < 2 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts needs to refer to a single column if no start or finish is supplied +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with NULL start in WHERE clause, we use CASE to wrap the NULL so it doesnt get folded +SELECT + time_bucket_gapfill(1,t1) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > CASE WHEN length(version()) > 0 THEN NULL::int ELSE NULL::int END AND t1 < 4 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with NULL finish in WHERE clause, we use CASE to wrap the NULL so it doesnt get folded +SELECT + time_bucket_gapfill(1,t1) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > 0 AND t1 < CASE WHEN length(version()) > 0 THEN NULL::int ELSE NULL::int END +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- non-Const NULL as start argument, we use CASE to wrap the NULL so it doesnt get folded +SELECT + time_bucket_gapfill(1,t1,CASE WHEN length(version())>0 THEN NULL::int ELSE NULL::int END) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > 0 AND t1 < 2 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- non-Const NULL as finish argument, we use CASE to wrap the NULL so it doesnt get folded +SELECT + time_bucket_gapfill(1,t1,NULL,CASE WHEN length(version())>0 THEN NULL::int ELSE NULL::int END) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > 0 AND t1 < 2 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- time_bucket_gapfill with constraints ORed +SELECT + time_bucket_gapfill(1::int8,t::int8) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= -1 OR t < 3 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts needs to refer to a single column if no start or finish is supplied +HINT: Specify start and finish as arguments or in the WHERE clause. +\set ON_ERROR_STOP 1 +-- int32 time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= -1 AND t < 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 +(4 rows) + +-- same query with less or equal as finish +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= -1 AND t <= 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 + 3 +(5 rows) + +-- int32 time_bucket_gapfill with start column and value switched +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + -1 < t AND t < 3 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 +(3 rows) + +-- int32 time_bucket_gapfill with finish column and value switched +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= 0 AND 3 >= t +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 + 3 +(4 rows) + +-- int16 time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill(1::int2,t) +FROM (VALUES (1::int2),(2::int2)) v(t) +WHERE + t >= -1 AND t < 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 +(4 rows) + +-- int64 time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill(1::int8,t) +FROM (VALUES (1::int8),(2::int8)) v(t) +WHERE + t >= -1 AND t < 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 +(4 rows) + +-- date time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill('1d'::interval,t) +FROM (VALUES ('1999-12-30'::date),('2000-01-01'::date)) v(t) +WHERE + t >= '1999-12-29' AND t < '2000-01-03' +GROUP BY 1; + time_bucket_gapfill + 12-29-1999 + 12-30-1999 + 12-31-1999 + 01-01-2000 + 01-02-2000 +(5 rows) + +-- timestamp time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill('12h'::interval,t) +FROM (VALUES ('1999-12-30'::timestamp),('2000-01-01'::timestamp)) v(t) +WHERE + t >= '1999-12-29' AND t < '2000-01-03' +GROUP BY 1; + time_bucket_gapfill + Wed Dec 29 00:00:00 1999 + Wed Dec 29 12:00:00 1999 + Thu Dec 30 00:00:00 1999 + Thu Dec 30 12:00:00 1999 + Fri Dec 31 00:00:00 1999 + Fri Dec 31 12:00:00 1999 + Sat Jan 01 00:00:00 2000 + Sat Jan 01 12:00:00 2000 + Sun Jan 02 00:00:00 2000 + Sun Jan 02 12:00:00 2000 +(10 rows) + +-- timestamptz time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill('12h'::interval,t) +FROM (VALUES ('1999-12-30'::timestamptz),('2000-01-01'::timestamptz)) v(t) +WHERE + t >= '1999-12-29' AND t < '2000-01-03' +GROUP BY 1; + time_bucket_gapfill + Tue Dec 28 16:00:00 1999 PST + Wed Dec 29 04:00:00 1999 PST + Wed Dec 29 16:00:00 1999 PST + Thu Dec 30 04:00:00 1999 PST + Thu Dec 30 16:00:00 1999 PST + Fri Dec 31 04:00:00 1999 PST + Fri Dec 31 16:00:00 1999 PST + Sat Jan 01 04:00:00 2000 PST + Sat Jan 01 16:00:00 2000 PST + Sun Jan 02 04:00:00 2000 PST + Sun Jan 02 16:00:00 2000 PST +(11 rows) + +-- timestamptz time_bucket_gapfill with more complex expression +SELECT + time_bucket_gapfill('12h'::interval,t) +FROM (VALUES ('1999-12-30'::timestamptz),('2000-01-01'::timestamptz)) v(t) +WHERE + t >= '2000-01-03'::timestamptz - '4d'::interval AND t < '2000-01-03' +GROUP BY 1; + time_bucket_gapfill + Wed Dec 29 16:00:00 1999 PST + Thu Dec 30 04:00:00 1999 PST + Thu Dec 30 16:00:00 1999 PST + Fri Dec 31 04:00:00 1999 PST + Fri Dec 31 16:00:00 1999 PST + Sat Jan 01 04:00:00 2000 PST + Sat Jan 01 16:00:00 2000 PST + Sun Jan 02 04:00:00 2000 PST + Sun Jan 02 16:00:00 2000 PST +(9 rows) + +-- timestamptz time_bucket_gapfill with different datatype in finish constraint +SELECT + time_bucket_gapfill('12h'::interval,t) +FROM (VALUES ('1999-12-30'::timestamptz),('2000-01-01'::timestamptz)) v(t) +WHERE + t >= '2000-01-03'::timestamptz - '4d'::interval AND t < '2000-01-03'::date +GROUP BY 1; + time_bucket_gapfill + Wed Dec 29 16:00:00 1999 PST + Thu Dec 30 04:00:00 1999 PST + Thu Dec 30 16:00:00 1999 PST + Fri Dec 31 04:00:00 1999 PST + Fri Dec 31 16:00:00 1999 PST + Sat Jan 01 04:00:00 2000 PST + Sat Jan 01 16:00:00 2000 PST + Sun Jan 02 04:00:00 2000 PST + Sun Jan 02 16:00:00 2000 PST +(9 rows) + +-- time_bucket_gapfill with now() as start +SELECT + time_bucket_gapfill('1h'::interval,t) +FROM (VALUES (now()),(now())) v(t) +WHERE + t >= now() AND t < now() - '1h'::interval +GROUP BY 1; + time_bucket_gapfill +(0 rows) + +-- time_bucket_gapfill with multiple constraints +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= -1 AND t < 3 and t>1 AND t <=4 AND length(version()) > 0 +GROUP BY 1; + time_bucket_gapfill + 2 +(1 row) + +-- int32 time_bucket_gapfill with greater for start +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t > -2 AND t < 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 +(4 rows) + +-- test DISTINCT +SELECT DISTINCT ON (color) + time_bucket_gapfill(1,time,0,5) as time, + color, + min(value) as m +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color ORDER BY 2,1; + time | color | m +------+-------+--- + 0 | blue | + 0 | red | +(2 rows) + +-- test DISTINCT with window functions +SELECT DISTINCT ON (row_number() OVER ()) + time_bucket_gapfill(1,time,0,5) as time, + color, + row_number() OVER () +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color; + time | color | row_number +------+-------+------------ + 0 | blue | 1 + 1 | blue | 2 + 2 | blue | 3 + 3 | blue | 4 + 4 | blue | 5 + 0 | red | 6 + 1 | red | 7 + 2 | red | 8 + 3 | red | 9 + 4 | red | 10 +(10 rows) + +-- test DISTINCT with window functions and PARTITION BY +SELECT DISTINCT ON (color,row_number() OVER (PARTITION BY color)) + time_bucket_gapfill(1,time,0,5) as time, + color, + row_number() OVER (PARTITION BY color) +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color; + time | color | row_number +------+-------+------------ + 0 | blue | 1 + 1 | blue | 2 + 2 | blue | 3 + 3 | blue | 4 + 4 | blue | 5 + 0 | red | 1 + 1 | red | 2 + 2 | red | 3 + 3 | red | 4 + 4 | red | 5 +(10 rows) + +-- test DISTINCT with window functions not in targetlist +SELECT DISTINCT ON (row_number() OVER ()) + time_bucket_gapfill(1,time,0,5) as time, + color, + row_number() OVER (PARTITION BY color) +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color; + time | color | row_number +------+-------+------------ + 0 | blue | 1 + 1 | blue | 2 + 2 | blue | 3 + 3 | blue | 4 + 4 | blue | 5 + 0 | red | 1 + 1 | red | 2 + 2 | red | 3 + 3 | red | 4 + 4 | red | 5 +(10 rows) + +-- test column references +SELECT + row_number() OVER (PARTITION BY color), + locf(min(time)), + color, + time_bucket_gapfill(1,time,0,5) as time +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 3,4; + row_number | locf | color | time +------------+------+-------+------ + 1 | | blue | 0 + 2 | 1 | blue | 1 + 3 | 1 | blue | 2 + 4 | 1 | blue | 3 + 5 | 1 | blue | 4 + 1 | | red | 0 + 2 | | red | 1 + 3 | 2 | red | 2 + 4 | 2 | red | 3 + 5 | 2 | red | 4 +(10 rows) + +-- test with Nested Loop +SELECT l.id, bucket, data_value FROM + (VALUES (1), (2), (3), (4)) a(id) + INNER JOIN LATERAL ( + SELECT b.id id, time_bucket_gapfill('1'::int, time, start=>'1'::int, finish=> '5'::int) bucket, locf(last(data, time)) data_value + FROM (VALUES (1, 1, 1), (1, 4, 4), (2, 1, -1), (2, 4, -4)) b(id, time, data) + WHERE a.id = b.id + GROUP BY b.id, bucket + ) as l on (true); + id | bucket | data_value +----+--------+------------ + 1 | 1 | 1 + 1 | 2 | 1 + 1 | 3 | 1 + 1 | 4 | 4 + 2 | 1 | -1 + 2 | 2 | -1 + 2 | 3 | -1 + 2 | 4 | -4 +(8 rows) + +-- test prepared statement +PREPARE prep_gapfill AS +SELECT + time_bucket_gapfill(1,time,0,5) as time, + locf(min(value)) +FROM (VALUES (1,1),(2,2)) v(time,value) +GROUP BY 1; +-- execute 10 times to make sure turning it into generic plan works +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +DEALLOCATE prep_gapfill; +-- test column references with TIME_COLUMN last +SELECT + row_number() OVER (PARTITION BY color), + locf(min(time)), + color, + time_bucket_gapfill(1,time,0,5) as time +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 3,4; + row_number | locf | color | time +------------+------+-------+------ + 1 | | blue | 0 + 2 | 1 | blue | 1 + 3 | 1 | blue | 2 + 4 | 1 | blue | 3 + 5 | 1 | blue | 4 + 1 | | red | 0 + 2 | | red | 1 + 3 | 2 | red | 2 + 4 | 2 | red | 3 + 5 | 2 | red | 4 +(10 rows) + +-- test expressions on GROUP BY columns +SELECT + row_number() OVER (PARTITION BY color), + locf(min(time)), + color, + length(color), + time_bucket_gapfill(1,time,0,5) as time +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 3,5; + row_number | locf | color | length | time +------------+------+-------+--------+------ + 1 | | blue | 4 | 0 + 2 | 1 | blue | 4 | 1 + 3 | 1 | blue | 4 | 2 + 4 | 1 | blue | 4 | 3 + 5 | 1 | blue | 4 | 4 + 1 | | red | 3 | 0 + 2 | | red | 3 | 1 + 3 | 2 | red | 3 | 2 + 4 | 2 | red | 3 | 3 + 5 | 2 | red | 3 | 4 +(10 rows) + +-- test columns derived from GROUP BY columns with cast +SELECT + time_bucket_gapfill(1,time,0,5) as time, + device_id::text +FROM (VALUES (1,1),(2,2)) v(time,device_id) +GROUP BY 1,device_id; + time | device_id +------+----------- + 0 | 1 + 1 | 1 + 2 | 1 + 3 | 1 + 4 | 1 + 0 | 2 + 1 | 2 + 2 | 2 + 3 | 2 + 4 | 2 +(10 rows) + +-- test columns derived from GROUP BY columns with expression +SELECT + time_bucket_gapfill(1,time,0,5) as time, + 'Device ' || device_id::text +FROM (VALUES (1,1),(2,2)) v(time,device_id) +GROUP BY 1,device_id; + time | ?column? +------+---------- + 0 | Device 1 + 1 | Device 1 + 2 | Device 1 + 3 | Device 1 + 4 | Device 1 + 0 | Device 2 + 1 | Device 2 + 2 | Device 2 + 3 | Device 2 + 4 | Device 2 +(10 rows) + +--test interpolation with big differences in values (test overflows in calculations) +--we use the biggest possible difference in time(x) and the value(y). +--For bigints we also test values of smaller than bigintmax/min to avoid +--the symmetry where x=y (which catches more errors) +SELECT 9223372036854775807 as big_int_max \gset +SELECT -9223372036854775808 as big_int_min \gset +SELECT + time_bucket_gapfill(1,time,0,1) AS time, + interpolate(min(s)) AS "smallint", + interpolate(min(i)) AS "int", + interpolate(min(b)) AS "bigint", + interpolate(min(b2)) AS "bigint2", + interpolate(min(d)) AS "double" +FROM (values (:big_int_min,(-32768)::smallint,(-2147483648)::int,:big_int_min,-2147483648::bigint, '-Infinity'::double precision), + (:big_int_max, 32767::smallint, 2147483647::int,:big_int_max, 2147483647::bigint, 'Infinity'::double precision)) v(time,s,i,b,b2,d) +GROUP BY 1 ORDER BY 1; + time | smallint | int | bigint | bigint2 | double +----------------------+----------+-------------+----------------------+-------------+----------- + -9223372036854775808 | -32768 | -2147483648 | -9223372036854775808 | -2147483648 | -Infinity + 0 | 0 | 0 | 0 | 0 | Infinity + 9223372036854775807 | 32767 | 2147483647 | 9223372036854775807 | 2147483647 | Infinity +(3 rows) + +-- issue #2232: This query used to trigger error "could not find +-- pathkey item to sort" due to a corrupt query plan +SELECT time_bucket_gapfill('1 h', time) AS time, + locf(sum(v1)) AS v1_sum, + interpolate(sum(v2)) AS v2_sum +FROM metrics_tstz +WHERE time >= '2018-01-01 04:00' AND time < '2018-01-01 08:00' +GROUP BY 1 +ORDER BY 1 DESC; + time | v1_sum | v2_sum +------------------------------+--------+-------- + Mon Jan 01 07:00:00 2018 PST | 2.3 | 70 + Mon Jan 01 06:00:00 2018 PST | 2.1 | 65 + Mon Jan 01 05:00:00 2018 PST | 2.1 | 60 + Mon Jan 01 04:00:00 2018 PST | | +(4 rows) + +-- query without gapfill: +SELECT time_bucket('1 h', time) AS time, + sum(v1) AS v1_sum, + sum(v2) AS v1_sum +FROM metrics_tstz +WHERE time >= '2018-01-01 04:00' AND time < '2018-01-01 08:00' +GROUP BY 1 +ORDER BY 1 DESC; + time | v1_sum | v1_sum +------------------------------+--------+-------- + Mon Jan 01 07:00:00 2018 PST | 2.3 | 70 + Mon Jan 01 05:00:00 2018 PST | 2.1 | 60 +(2 rows) + +-- query to show original data +SELECT * FROM metrics_tstz +WHERE time >= '2018-01-01 04:00' AND time < '2018-01-01 08:00' +ORDER BY 1 DESC, 2; + time | device_id | v1 | v2 +------------------------------+-----------+-----+---- + Mon Jan 01 07:00:00 2018 PST | 1 | 0 | 0 + Mon Jan 01 07:00:00 2018 PST | 2 | 1.4 | 40 + Mon Jan 01 07:00:00 2018 PST | 3 | 0.9 | 30 + Mon Jan 01 05:00:00 2018 PST | 1 | 0.5 | 10 + Mon Jan 01 05:00:00 2018 PST | 2 | 0.7 | 20 + Mon Jan 01 05:00:00 2018 PST | 3 | 0.9 | 30 +(6 rows) + +-- issue #3048 +-- test gapfill/hashagg planner interaction +-- this used to produce a plan without gapfill node +EXPLAIN (costs off) SELECT time_bucket_gapfill('52w', time, start:='2000-01-01', finish:='2000-01-10') AS time, + sum(v1) AS v1_sum +FROM metrics +GROUP BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Sort + Sort Key: (time_bucket_gapfill('@ 364 days'::interval, _hyper_X_X_chunk."time", 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone, 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> HashAggregate + Group Key: time_bucket_gapfill('@ 364 days'::interval, _hyper_X_X_chunk."time", 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone, 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(10 rows) + +-- issue #3834 +-- test projection handling in gapfill +CREATE TABLE i3834(time timestamptz NOT NULL, ship_id int, value float); +SELECT table_name FROM create_hypertable('i3834','time'); + table_name + i3834 +(1 row) + +INSERT INTO i3834 VALUES ('2020-12-01 14:05:00+01',1,3.123), ('2020-12-01 14:05:00+01',2,4.123), ('2020-12-01 14:05:00+01',3,5.123); +SELECT + time_bucket_gapfill('30000 ms'::interval, time) AS time, + ship_id, + interpolate (avg(value)), + 'speedlog' AS source +FROM + i3834 +WHERE + ship_id IN (1, 2) + AND time >= '2020-12-01 14:05:00+01' + AND time < '2020-12-01 14:10:00+01' +GROUP BY 1,2; + time | ship_id | interpolate | source +------------------------------+---------+-------------+---------- + Tue Dec 01 05:05:00 2020 PST | 1 | 3.123 | speedlog + Tue Dec 01 05:05:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:06:00 2020 PST | 1 | | speedlog + Tue Dec 01 05:06:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:07:00 2020 PST | 1 | | speedlog + Tue Dec 01 05:07:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:08:00 2020 PST | 1 | | speedlog + Tue Dec 01 05:08:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:09:00 2020 PST | 1 | | speedlog + Tue Dec 01 05:09:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:05:00 2020 PST | 2 | 4.123 | speedlog + Tue Dec 01 05:05:30 2020 PST | 2 | | speedlog + Tue Dec 01 05:06:00 2020 PST | 2 | | speedlog + Tue Dec 01 05:06:30 2020 PST | 2 | | speedlog + Tue Dec 01 05:07:00 2020 PST | 2 | | speedlog + Tue Dec 01 05:07:30 2020 PST | 2 | | speedlog + Tue Dec 01 05:08:00 2020 PST | 2 | | speedlog + Tue Dec 01 05:08:30 2020 PST | 2 | | speedlog + Tue Dec 01 05:09:00 2020 PST | 2 | | speedlog + Tue Dec 01 05:09:30 2020 PST | 2 | | speedlog +(20 rows) + +DROP TABLE i3834; +-- issue #1528 +-- test float rounding for certain float values when start and end are identical +SELECT + time_bucket_gapfill('1min'::interval, ts::timestamptz, start:='2019-11-05 2:20', finish:='2019-11-05 2:30'), + interpolate(avg(20266.959547::float4)) AS float4, + interpolate(avg(20266.959547::float8)) AS float8 +FROM (VALUES ('2019-11-05 2:20'), ('2019-11-05 2:30')) v (ts) +GROUP BY 1; + time_bucket_gapfill | float4 | float8 +------------------------------+-----------------+-------------- + Tue Nov 05 02:20:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:21:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:22:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:23:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:24:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:25:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:26:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:27:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:28:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:29:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:30:00 2019 PST | 20266.958984375 | 20266.959547 +(11 rows) + +-- check gapfill group change detection with TOASTed values +CREATE TABLE gapfill_group_toast(time timestamptz NOT NULL, device text, value float); +SELECT table_name FROM create_hypertable('gapfill_group_toast', 'time'); + table_name + gapfill_group_toast +(1 row) + +INSERT INTO gapfill_group_toast +SELECT + generate_series('2022-06-01'::timestamptz, '2022-06-03'::timestamptz, '1min'::interval), + '4e0ee04cc6a94fd40497b8dbaac2fe434e0ee04cc6a94fd40497b8dbaac2fe43', + random(); +ALTER TABLE gapfill_group_toast SET(timescaledb.compress, timescaledb.compress_segmentby = 'device'); +SELECT count(compress_chunk(c)) FROM show_chunks('gapfill_group_toast') c; + count + 2 +(1 row) + +SELECT + time_bucket_gapfill('1 day', time), device +FROM gapfill_group_toast +WHERE time >= '2022-06-01' AND time <= '2022-06-02' +GROUP BY 1,2; + time_bucket_gapfill | device +------------------------------+------------------------------------------------------------------ + Tue May 31 17:00:00 2022 PDT | 4e0ee04cc6a94fd40497b8dbaac2fe434e0ee04cc6a94fd40497b8dbaac2fe43 + Wed Jun 01 17:00:00 2022 PDT | 4e0ee04cc6a94fd40497b8dbaac2fe434e0ee04cc6a94fd40497b8dbaac2fe43 +(2 rows) + +DROP TABLE gapfill_group_toast; +-- test bucketing by month +SELECT time_bucket_gapfill('2 month'::interval, ts, '2000-01-01'::timestamptz,'2001-01-01'::timestamptz) FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Fri Dec 31 16:00:00 1999 PST + Tue Feb 29 16:00:00 2000 PST + Sun Apr 30 17:00:00 2000 PDT + Fri Jun 30 17:00:00 2000 PDT + Thu Aug 31 17:00:00 2000 PDT + Tue Oct 31 16:00:00 2000 PST + Sun Dec 31 16:00:00 2000 PST +(7 rows) + +SELECT time_bucket_gapfill('1 year'::interval, ts, '2000-01-01'::timestamptz,'2003-01-01'::timestamptz) FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Fri Dec 31 16:00:00 1999 PST + Sun Dec 31 16:00:00 2000 PST + Mon Dec 31 16:00:00 2001 PST + Tue Dec 31 16:00:00 2002 PST +(4 rows) + +SELECT time_bucket_gapfill('1 century'::interval, ts, '1900-01-01'::timestamptz,'2103-01-01'::timestamptz) FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Sun Dec 31 16:00:00 1899 PST + Fri Dec 31 16:00:00 1999 PST + Thu Dec 31 16:00:00 2099 PST +(3 rows) + +-- test bucketing with timezone +SELECT time_bucket_gapfill('2 month'::interval, ts, 'Europe/Berlin', '2000-01-01','2001-01-01') FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Fri Dec 31 15:00:00 1999 PST + Tue Feb 29 15:00:00 2000 PST + Sat Apr 29 15:00:00 2000 PDT + Thu Jun 29 15:00:00 2000 PDT + Tue Aug 29 15:00:00 2000 PDT + Sun Oct 29 15:00:00 2000 PST + Fri Dec 29 15:00:00 2000 PST +(7 rows) + +SELECT time_bucket_gapfill('2 month'::interval, ts, current_setting('timezone'), '2000-01-01','2001-01-01') FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Sat Jan 01 00:00:00 2000 PST + Wed Mar 01 00:00:00 2000 PST + Mon May 01 00:00:00 2000 PDT + Sat Jul 01 00:00:00 2000 PDT + Fri Sep 01 00:00:00 2000 PDT + Wed Nov 01 00:00:00 2000 PST +(6 rows) + +SELECT time_bucket_gapfill('2 month'::interval, ts, 'UTC', '2000-01-01','2001-01-01') FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Fri Dec 31 16:00:00 1999 PST + Tue Feb 29 16:00:00 2000 PST + Sat Apr 29 16:00:00 2000 PDT + Thu Jun 29 16:00:00 2000 PDT + Tue Aug 29 16:00:00 2000 PDT + Sun Oct 29 16:00:00 2000 PST + Fri Dec 29 16:00:00 2000 PST +(7 rows) + +SET timezone TO 'Europe/Berlin'; +SELECT time_bucket_gapfill('2 month'::interval, ts, 'Europe/Berlin', '2000-01-01','2001-01-01') FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Sat Jan 01 00:00:00 2000 CET + Wed Mar 01 00:00:00 2000 CET + Mon May 01 00:00:00 2000 CEST + Sat Jul 01 00:00:00 2000 CEST + Fri Sep 01 00:00:00 2000 CEST + Wed Nov 01 00:00:00 2000 CET +(6 rows) + +RESET timezone; +DROP INDEX gapfill_plan_test_indx; +-- Test gapfill with arrays (#5981) +SELECT time_bucket_gapfill(5, ts, 1, 100) as ts, int_arr, locf(last(value, ts)) +FROM ( + SELECT ARRAY[1,2,3,4]::int[] as int_arr, x as ts, x+500000 as value + FROM generate_series(1, 10, 100) as x + ) t +GROUP BY 1, 2 + ts | int_arr | locf +----+-----------+-------- + 0 | {1,2,3,4} | 500001 + 5 | {1,2,3,4} | 500001 + 10 | {1,2,3,4} | 500001 + 15 | {1,2,3,4} | 500001 + 20 | {1,2,3,4} | 500001 + 25 | {1,2,3,4} | 500001 + 30 | {1,2,3,4} | 500001 + 35 | {1,2,3,4} | 500001 + 40 | {1,2,3,4} | 500001 + 45 | {1,2,3,4} | 500001 + 50 | {1,2,3,4} | 500001 + 55 | {1,2,3,4} | 500001 + 60 | {1,2,3,4} | 500001 + 65 | {1,2,3,4} | 500001 + 70 | {1,2,3,4} | 500001 + 75 | {1,2,3,4} | 500001 + 80 | {1,2,3,4} | 500001 + 85 | {1,2,3,4} | 500001 + 90 | {1,2,3,4} | 500001 + 95 | {1,2,3,4} | 500001 +(20 rows) + diff --git a/tsl/test/shared/expected/gapfill-16.out b/tsl/test/shared/expected/gapfill-16.out new file mode 100644 index 00000000000..4ba9c41a2bf --- /dev/null +++ b/tsl/test/shared/expected/gapfill-16.out @@ -0,0 +1,3368 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set EXPLAIN 'EXPLAIN (COSTS OFF)' +-- we want to see error details in the output +\set VERBOSITY default +CREATE TABLE gapfill_plan_test(time timestamptz NOT NULL, value float); +SELECT table_name FROM create_hypertable('gapfill_plan_test','time',chunk_time_interval=>'4 weeks'::interval); + table_name + gapfill_plan_test +(1 row) + +INSERT INTO gapfill_plan_test SELECT generate_series('2018-01-01'::timestamptz,'2018-04-01'::timestamptz,'1m'::interval), 1.0; +-- simple example +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Values Scan on "*VALUES*" +(6 rows) + +-- test sorting +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1 +ORDER BY 2; +QUERY PLAN + Sort + Sort Key: (avg("*VALUES*".column2)) + -> Custom Scan (GapFill) + -> GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Values Scan on "*VALUES*" +(8 rows) + +-- test sort direction +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1 +ORDER BY 1 DESC; +QUERY PLAN + Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) DESC + -> Custom Scan (GapFill) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) NULLS FIRST + -> HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now()) + -> Values Scan on "*VALUES*" +(8 rows) + +-- test order by aggregate function +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1 +ORDER BY 2,1; +QUERY PLAN + Sort + Sort Key: (avg("*VALUES*".column2)), (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Custom Scan (GapFill) + -> GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Values Scan on "*VALUES*" +(8 rows) + +-- test query without order by +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,now(),now()), + avg(c2) +FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2) +GROUP BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1, now(), now())) + -> Values Scan on "*VALUES*" +(6 rows) + +-- test parallel query +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + avg(value) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Finalize GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Partial HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Parallel Append + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk +(15 rows) + +-- test parallel query with locf +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + locf(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Finalize GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Partial HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Parallel Append + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk +(15 rows) + +-- test parallel query with interpolate +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + interpolate(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Finalize GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Partial HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Parallel Append + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk +(15 rows) + +-- make sure we can run gapfill in parallel workers +-- ensure this plan runs in parallel +:EXPLAIN +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + interpolate(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 2 +LIMIT 1; +QUERY PLAN + Limit + -> Sort + Sort Key: (interpolate(avg(gapfill_plan_test.value), NULL::record, NULL::record)) + -> Custom Scan (GapFill) + -> Finalize GroupAggregate + Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> Partial HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Parallel Append + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk + -> Parallel Seq Scan on _hyper_X_X_chunk +(18 rows) + +-- actually run a parallel gapfill +SELECT + time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), + interpolate(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 2 +LIMIT 1; + time_bucket_gapfill | interpolate +------------------------------+------------- + Mon Jan 01 00:00:00 2018 PST | 1 +(1 row) + +-- test sort optimizations +-- test sort optimization with single member order by, +-- should use index scan (no GapFill node for this one since we're not gapfilling) +:EXPLAIN SELECT time_bucket_gapfill('5m',time),value +FROM gapfill_plan_test +ORDER BY 1; +QUERY PLAN + Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone)) + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(8 rows) + +SET max_parallel_workers_per_gather TO 0; +-- test sort optimizations with locf +:EXPLAIN SELECT time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), locf(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(11 rows) + +-- test sort optimizations with interpolate +:EXPLAIN SELECT time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), interpolate(avg(value)) +FROM gapfill_plan_test +GROUP BY 1 +ORDER BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)) + -> HashAggregate + Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone) + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(11 rows) + +RESET max_parallel_workers_per_gather; +CREATE INDEX gapfill_plan_test_indx ON gapfill_plan_test(value, time); +-- test sort optimization with ordering by multiple columns and time_bucket_gapfill not last, +-- must not use index scan +:EXPLAIN SELECT time_bucket_gapfill('5m',time),value +FROM gapfill_plan_test +ORDER BY 1,2; +QUERY PLAN + Sort + Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone)), _hyper_X_X_chunk.value + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(8 rows) + +-- test sort optimization with ordering by multiple columns and time_bucket as last member, +-- should use index scan +:EXPLAIN SELECT time_bucket_gapfill('5m',time),value +FROM gapfill_plan_test +ORDER BY 2,1; +QUERY PLAN + Incremental Sort + Sort Key: _hyper_X_X_chunk.value, (time_bucket_gapfill('@ 5 mins'::interval, _hyper_X_X_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone)) + Presorted Key: _hyper_X_X_chunk.value + -> Result + -> Merge Append + Sort Key: _hyper_X_X_chunk.value + -> Index Only Scan using _hyper_X_X_chunk_gapfill_plan_test_indx on _hyper_X_X_chunk + -> Index Only Scan using _hyper_X_X_chunk_gapfill_plan_test_indx on _hyper_X_X_chunk + -> Index Only Scan using _hyper_X_X_chunk_gapfill_plan_test_indx on _hyper_X_X_chunk + -> Index Only Scan using _hyper_X_X_chunk_gapfill_plan_test_indx on _hyper_X_X_chunk +(10 rows) + +\set METRICS metrics_int +-- All test against table :METRICS first +\set ON_ERROR_STOP 0 +-- inverse of previous test query to confirm an error is actually thrown +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3 +FROM :METRICS m1 +WHERE time = 5 +GROUP BY 1,2,3 ORDER BY 2,3,1; +ERROR: division by zero +-- test window functions with multiple column references +SELECT + time_bucket_gapfill(1,time,1,2), + first(min(time),min(time)) OVER () +FROM :METRICS +GROUP BY 1; +ERROR: window functions with multiple column references not supported +-- test with unsupported operator +SELECT + time_bucket_gapfill(1,time) +FROM :METRICS +WHERE time =0 AND time < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- test with 2 tables and where clause doesnt match gapfill argument +SELECT + time_bucket_gapfill(1,m2.time) +FROM :METRICS m, :METRICS m2 +WHERE m.time >=0 AND m.time < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- test inner join and where clause doesnt match gapfill argument +SELECT + time_bucket_gapfill(1,m2.time) +FROM :METRICS m1 INNER JOIN :METRICS m2 ON m1.time=m2.time +WHERE m1.time >=0 AND m1.time < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- test outer join with constraints in join condition +-- not usable as start/stop +SELECT + time_bucket_gapfill(1,m1.time) +FROM :METRICS m1 LEFT OUTER JOIN :METRICS m2 ON m1.time=m2.time AND m1.time >=0 AND m1.time < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +\set ON_ERROR_STOP 1 +\ir include/gapfill_metrics_query.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- test locf lookup query does not trigger when not needed +-- 1/(SELECT 0) will throw an error in the lookup query but in order to not +-- always trigger evaluation it needs to be correlated otherwise postgres will +-- always run it once even if the value is never used +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3 +FROM :METRICS m1 +WHERE time >= 0 AND time < 5 +GROUP BY 1,2,3 ORDER BY 2,3,1; + time | device_id | sensor_id | locf3 +------+-----------+-----------+------- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +-- test locf with correlated subquery +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + avg(value), + locf(min(value)) AS locf, + locf(min(value)::int,23) AS locf1, + locf(min(value)::int,(SELECT 42)) AS locf2, + locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3 +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 2,3,1; + time | device_id | sensor_id | avg | locf | locf1 | locf2 | locf3 +------+-----------+-----------+-----+------+-------+-------+------- + 0 | 1 | 1 | 5 | 5 | 5 | 5 | 5 + 5 | 1 | 1 | | 5 | 5 | 5 | 5 + 10 | 1 | 1 | | 5 | 5 | 5 | 5 + 0 | 1 | 2 | | | 23 | 42 | -100 + 5 | 1 | 2 | 10 | 10 | 10 | 10 | 10 + 10 | 1 | 2 | | 10 | 10 | 10 | 10 +(6 rows) + +-- test locf with correlated subquery and "wrong order" +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + avg(value), + locf(min(value)) AS locf, + locf(min(value),23::float) AS locf1, + locf(min(value),(SELECT 42::float)) AS locf2, + locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3 +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 1,2,3; + time | device_id | sensor_id | avg | locf | locf1 | locf2 | locf3 +------+-----------+-----------+-----+------+-------+-------+------- + 0 | 1 | 1 | 5 | 5 | 5 | 5 | 5 + 0 | 1 | 2 | | | 23 | 42 | -100 + 5 | 1 | 1 | | 5 | 5 | 5 | 5 + 5 | 1 | 2 | 10 | 10 | 10 | 10 | 10 + 10 | 1 | 1 | | 5 | 5 | 5 | 5 + 10 | 1 | 2 | | 10 | 10 | 10 | 10 +(6 rows) + +-- test locf with correlated subquery and window functions +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)), + sum(locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1))) OVER (PARTITION BY device_id, sensor_id ROWS 1 PRECEDING) +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3; + time | device_id | sensor_id | locf | sum +------+-----------+-----------+------+------ + 0 | 1 | 1 | 5 | 5 + 5 | 1 | 1 | 5 | 10 + 10 | 1 | 1 | 5 | 10 + 0 | 1 | 2 | -100 | -100 + 5 | 1 | 2 | 10 | -90 + 10 | 1 | 2 | 10 | 20 +(6 rows) + +-- test JOINs +SELECT + time_bucket_gapfill(1,time,0,5) as time, + device_id, + d.name, + sensor_id, + s.name, + avg(m.value) +FROM :METRICS m +INNER JOIN devices d USING(device_id) +INNER JOIN sensors s USING(sensor_id) +WHERE time BETWEEN 0 AND 5 +GROUP BY 1,2,3,4,5; + time | device_id | name | sensor_id | name | avg +------+-----------+----------+-----------+----------+----- + 0 | 1 | Device 1 | 1 | Sensor 1 | 5 + 1 | 1 | Device 1 | 1 | Sensor 1 | + 2 | 1 | Device 1 | 1 | Sensor 1 | + 3 | 1 | Device 1 | 1 | Sensor 1 | + 4 | 1 | Device 1 | 1 | Sensor 1 | + 0 | 1 | Device 1 | 2 | Sensor 2 | + 1 | 1 | Device 1 | 2 | Sensor 2 | + 2 | 1 | Device 1 | 2 | Sensor 2 | + 3 | 1 | Device 1 | 2 | Sensor 2 | + 4 | 1 | Device 1 | 2 | Sensor 2 | + 5 | 1 | Device 1 | 2 | Sensor 2 | 10 +(11 rows) + +-- test interpolate with correlated subquery +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + avg(value), + interpolate(min(value)) AS ip, + interpolate(min(value),(-5,-5.0::float),(15,20.0::float)) AS ip1, + interpolate(min(value),(SELECT (-10,-10.0::float)),(SELECT (15,20.0::float))) AS ip2, + interpolate( + min(value), + (SELECT (time,value) FROM :METRICS m2 + WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time DESC LIMIT 1), + (SELECT (time,value) FROM :METRICS m2 + WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time LIMIT 1) + ) AS ip3 +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 2,3,1; + time | device_id | sensor_id | avg | ip | ip1 | ip2 | ip3 +------+-----------+-----------+-----+----+-----+------------------+------------------ + 0 | 1 | 1 | 5 | 5 | 5 | 5 | 5 + 5 | 1 | 1 | | | 10 | 10 | 4.75 + 10 | 1 | 1 | | | 15 | 15 | 4.5 + 0 | 1 | 2 | | | 2.5 | 3.33333333333333 | 4.76190476190476 + 5 | 1 | 2 | 10 | 10 | 10 | 10 | 10 + 10 | 1 | 2 | | | 15 | 15 | 4.21052631578947 +(6 rows) + +-- test interpolate with correlated subquery and window function +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + interpolate( + min(value), + (SELECT (time,value) FROM :METRICS m2 + WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time DESC LIMIT 1), + (SELECT (time,value) FROM :METRICS m2 + WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time LIMIT 1) + ), + sum(interpolate( + min(value), + (SELECT (time,value) FROM :METRICS m2 + WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time DESC LIMIT 1), + (SELECT (time,value) FROM :METRICS m2 + WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time LIMIT 1) + )) OVER (PARTITION BY device_id, sensor_id ROWS 1 PRECEDING) +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 2,3,1; + time | device_id | sensor_id | interpolate | sum +------+-----------+-----------+------------------+------------------ + 0 | 1 | 1 | 5 | 5 + 5 | 1 | 1 | 4.75 | 9.75 + 10 | 1 | 1 | 4.5 | 9.25 + 0 | 1 | 2 | 4.76190476190476 | 4.76190476190476 + 5 | 1 | 2 | 10 | 14.7619047619048 + 10 | 1 | 2 | 4.21052631578947 | 14.2105263157895 +(6 rows) + +-- test subqueries +-- subqueries will alter the shape of the plan and top-level constraints +-- might not end up in top-level of jointree +SELECT + time_bucket_gapfill(1,m1.time) +FROM :METRICS m1 +WHERE m1.time >=0 AND m1.time < 2 AND device_id IN (SELECT device_id FROM :METRICS) +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test inner join with constraints in join condition +SELECT + time_bucket_gapfill(1,m2.time) +FROM :METRICS m1 INNER JOIN :METRICS m2 ON m1.time=m2.time AND m2.time >=0 AND m2.time < 2 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test actual table +SELECT + time_bucket_gapfill(1,time) +FROM :METRICS +WHERE time >=0 AND time < 2 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test with table alias +SELECT + time_bucket_gapfill(1,time) +FROM :METRICS m +WHERE m.time >=0 AND m.time < 2 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test with 2 tables +SELECT + time_bucket_gapfill(1,m.time) +FROM :METRICS m, :METRICS m2 +WHERE m.time >=0 AND m.time < 2 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 +(2 rows) + +-- test prepared statement with locf with lookup query +PREPARE prep_gapfill AS +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) +FROM :METRICS m1 +WHERE time >= 0 AND time < 5 +GROUP BY 1,2,3; +-- execute 10 times to make sure turning it into generic plan works +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | locf +------+-----------+-----------+------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 5 + 10 | 1 | 1 | 5 +(3 rows) + +DEALLOCATE prep_gapfill; +-- test prepared statement with interpolate with lookup query +PREPARE prep_gapfill AS +SELECT + time_bucket_gapfill(5,time,0,11) AS time, + device_id, + sensor_id, + interpolate( + min(value), + (SELECT (time,value) FROM :METRICS m2 + WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time DESC LIMIT 1), + (SELECT (time,value) FROM :METRICS m2 + WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id + ORDER BY time LIMIT 1) + ) +FROM :METRICS m1 +WHERE time >= 0 AND time < 10 +GROUP BY 1,2,3 ORDER BY 2,3,1; +-- execute 10 times to make sure turning it into generic plan works +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +EXECUTE prep_gapfill; + time | device_id | sensor_id | interpolate +------+-----------+-----------+------------------ + 0 | 1 | 1 | 5 + 5 | 1 | 1 | 4.75 + 10 | 1 | 1 | 4.5 + 0 | 1 | 2 | 4.76190476190476 + 5 | 1 | 2 | 10 + 10 | 1 | 2 | 4.21052631578947 +(6 rows) + +DEALLOCATE prep_gapfill; +-- test prepared statement with variable gapfill arguments +PREPARE prep_gapfill(int,int,int) AS +SELECT + time_bucket_gapfill($1,time,$2,$3) AS time, + device_id, + sensor_id, + min(value) +FROM :METRICS m1 +WHERE time >= $2 AND time < $3 AND device_id=1 AND sensor_id=1 +GROUP BY 1,2,3 ORDER BY 2,3,1; +-- execute 10 times to make sure turning it into generic plan works +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +EXECUTE prep_gapfill(5,0,10); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 0 | 1 | 1 | 5 + 5 | 1 | 1 | +(2 rows) + +EXECUTE prep_gapfill(4,100,110); + time | device_id | sensor_id | min +------+-----------+-----------+----- + 100 | 1 | 1 | 0 + 104 | 1 | 1 | + 108 | 1 | 1 | +(3 rows) + +DEALLOCATE prep_gapfill; +-- Tests without tables +-- test locf and interpolate call without gapfill +SELECT locf(1); + locf + 1 +(1 row) + +SELECT interpolate(1); + interpolate + 1 +(1 row) + +-- test locf and interpolate call with NULL input +SELECT locf(NULL::int); + locf + +(1 row) + +SELECT interpolate(NULL::bigint); + interpolate + +(1 row) + +\set ON_ERROR_STOP 0 +-- test time_bucket_gapfill not top level function call +SELECT + 1 + time_bucket_gapfill(1,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: no top level time_bucket_gapfill in group by clause +-- test locf with treat_null_as_missing not BOOL +SELECT + time_bucket_gapfill(1,time,1,11), + locf(min(time),treat_null_as_missing:=1) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: function locf(integer, treat_null_as_missing => integer) does not exist +LINE 3: locf(min(time),treat_null_as_missing:=1) + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +-- test locf with treat_null_as_missing not literal +SELECT + time_bucket_gapfill(1,time,1,11), + locf(min(time),treat_null_as_missing:=random()>0) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid locf argument: treat_null_as_missing must be a BOOL literal +-- test interpolate lookup query with 1 element in record +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),next=>(SELECT ROW(10))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: interpolate RECORD arguments must have 2 elements +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),prev=>(SELECT ROW(10))) +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: interpolate RECORD arguments must have 2 elements +-- test interpolate lookup query with 3 elements in record +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),next=>(SELECT (10,10,10))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: interpolate RECORD arguments must have 2 elements +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),prev=>(SELECT (10,10,10))) +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: interpolate RECORD arguments must have 2 elements +-- test interpolate lookup query with mismatching time datatype +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),next=>(SELECT (10::float,10))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: first argument of interpolate returned record must match used timestamp datatype +DETAIL: Returned type double precision does not match expected type integer. +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),prev=>(SELECT (10::float,10))) +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: first argument of interpolate returned record must match used timestamp datatype +DETAIL: Returned type double precision does not match expected type integer. +-- test interpolate lookup query with mismatching value datatype +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),next=>(SELECT (10,10::float))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: second argument of interpolate returned record must match used interpolate datatype +DETAIL: Returned type double precision does not match expected type integer. +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(min(time),prev=>(SELECT (10,10::float))) +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: second argument of interpolate returned record must match used interpolate datatype +DETAIL: Returned type double precision does not match expected type integer. +-- test interpolate with unsupported datatype +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(text 'text') +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: function interpolate(text) does not exist +LINE 3: interpolate(text 'text') + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(interval '1d') +FROM (VALUES (2),(3)) v(time) +GROUP BY 1; +ERROR: function interpolate(interval) does not exist +LINE 3: interpolate(interval '1d') + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +-- test multiple time_bucket_gapfill calls +SELECT + time_bucket_gapfill(1,time,1,11),time_bucket_gapfill(1,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple time_bucket_gapfill calls not allowed +-- test nested time_bucket_gapfill calls +SELECT + time_bucket_gapfill(1,time_bucket_gapfill(1,time,1,11),1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple time_bucket_gapfill calls not allowed +-- test nested locf calls +SELECT + time_bucket_gapfill(1,time,1,11), + locf(locf(min(time))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple interpolate/locf function calls per resultset column not supported +-- test nested interpolate calls +SELECT + time_bucket_gapfill(1,time,1,11), + interpolate(interpolate(min(time))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple interpolate/locf function calls per resultset column not supported +-- test mixed locf/interpolate calls +SELECT + time_bucket_gapfill(1,time,1,11), + locf(interpolate(min(time))) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple interpolate/locf function calls per resultset column not supported +-- test window function inside locf +SELECT + time_bucket_gapfill(1,time,1,11), + locf(avg(min(time)) OVER ()) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: window functions must not be below locf +-- test nested window functions +-- prevented by postgres +SELECT + time_bucket_gapfill(1,time,1,11), + avg(avg(min(time)) OVER ()) OVER () +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: window function calls cannot be nested +LINE 3: avg(avg(min(time)) OVER ()) OVER () + ^ +-- test multiple window functions in single column +SELECT + time_bucket_gapfill(1,time,1,11), + avg(min(time)) OVER () + avg(min(time)) OVER () +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: multiple window function calls per column not supported +-- test locf not toplevel +SELECT + time_bucket_gapfill(1,time,1,11), + 1 + locf(min(time)) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: locf must be toplevel function call +-- test locf inside aggregate +SELECT + time_bucket_gapfill(1,time,1,11), + min(min(locf(time))) OVER () +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: aggregate functions must be below locf +-- test NULL args +SELECT + time_bucket_gapfill(NULL,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width cannot be NULL +SELECT + time_bucket_gapfill(1,NULL,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts cannot be NULL +SELECT + time_bucket_gapfill(1,time,NULL,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +SELECT + time_bucket_gapfill(1,time,1,NULL) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer finish from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +SELECT + time_bucket_gapfill(NULL,time,'Europe/Berlin','2000-06-01','2001-06-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2001-01-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width cannot be NULL +SELECT + time_bucket_gapfill('1day',NULL,'Europe/Berlin','2000-06-01','2001-06-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2001-01-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts cannot be NULL +SELECT + time_bucket_gapfill('1day',time,NULL,'2000-06-01','2001-06-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2001-01-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: timezone cannot be NULL +-- test 0 bucket_width +SELECT + time_bucket_gapfill(0,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +SELECT + time_bucket_gapfill('0d',time,'2000-01-01','2000-02-01') +FROM (VALUES ('2000-01-01'::date),('2000-02-01'::date)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +SELECT + time_bucket_gapfill('0d',time,'2000-01-01','2000-02-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2000-02-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +-- test negative bucket_width +SELECT + time_bucket_gapfill(-1,time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +SELECT + time_bucket_gapfill('-1d',time,'2000-01-01','2000-02-01') +FROM (VALUES ('2000-01-01'::date),('2000-02-01'::date)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +SELECT + time_bucket_gapfill('-1d',time,'2000-01-01','2000-02-01') +FROM (VALUES ('2000-01-01'::timestamptz),('2000-02-01'::timestamptz)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be greater than 0 +-- test subqueries as interval, start and stop (not supported atm) +SELECT + time_bucket_gapfill((SELECT 1),time,1,11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be a simple expression +SELECT + time_bucket_gapfill(1,time,(SELECT 1),11) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start must be a simple expression +SELECT + time_bucket_gapfill(1,time,1,(SELECT 11)) +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish must be a simple expression +\set ON_ERROR_STOP 1 +-- test time_bucket_gapfill without aggregation +-- this will not trigger gapfilling +SELECT + time_bucket_gapfill(1,time,1,11) +FROM (VALUES (1),(2)) v(time); + time_bucket_gapfill + 1 + 2 +(2 rows) + +SELECT + time_bucket_gapfill(1,time,1,11), + avg(time) OVER () +FROM (VALUES (1),(2)) v(time); + time_bucket_gapfill | avg +---------------------+-------------------- + 1 | 1.5000000000000000 + 2 | 1.5000000000000000 +(2 rows) + +-- test int int2/4/8 +SELECT + time_bucket_gapfill(1::int2,time::int2,0::int2,6::int2) +FROM (VALUES (1),(4)) v(time) +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 + 3 + 4 + 5 +(6 rows) + +SELECT + time_bucket_gapfill(1::int4,time::int4,0::int4,6::int4) +FROM (VALUES (1),(4)) v(time) +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 + 3 + 4 + 5 +(6 rows) + +SELECT + time_bucket_gapfill(1::int8,time::int8,0::int8,6::int8) +FROM (VALUES (1),(4)) v(time) +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 + 3 + 4 + 5 +(6 rows) + +-- test non-aligned bucket start +SELECT + time_bucket_gapfill(10,time,5,40) +FROM (VALUES (11),(22)) v(time) +GROUP BY 1; + time_bucket_gapfill + 0 + 10 + 20 + 30 +(4 rows) + +-- simple gapfill query +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + min(value) AS value +FROM (values (-10,1),(10,2),(11,3),(12,4),(22,5),(30,6),(66,7)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + -10 | 1 + 0 | + 10 | 2 + 20 | 5 + 30 | 6 + 40 | + 60 | 7 +(7 rows) + +-- test references to different columns +SELECT + time_bucket_gapfill(1,t,0,5) as t, + min(t),max(t),min(v),max(v) +FROM(VALUES (1,3),(2,5)) tb(t,v) +GROUP BY 1 ORDER BY 1; + t | min | max | min | max +---+-----+-----+-----+----- + 0 | | | | + 1 | 1 | 1 | 3 | 3 + 2 | 2 | 2 | 5 | 5 + 3 | | | | + 4 | | | | +(5 rows) + +-- test passing of values outside boundaries +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) +FROM (VALUES (-1),(1),(3),(6)) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +---------------------+----- + -1 | -1 + 0 | + 1 | 1 + 2 | + 3 | 3 + 4 | + 6 | 6 +(7 rows) + +-- test gap fill before first row and after last row +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) +FROM (VALUES (1),(2),(3)) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +---------------------+----- + 0 | + 1 | 1 + 2 | 2 + 3 | 3 + 4 | +(5 rows) + +-- test gap fill without rows in resultset +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) +FROM (VALUES (1),(2),(3)) v(time) +WHERE false +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +---------------------+----- + 0 | + 1 | + 2 | + 3 | + 4 | +(5 rows) + +-- test coalesce +SELECT + time_bucket_gapfill(1,time,0,5), + coalesce(min(time),0), + coalesce(min(value),0), + coalesce(min(value),7) +FROM (VALUES (1,1),(2,2),(3,3)) v(time,value) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | coalesce | coalesce | coalesce +---------------------+----------+----------+---------- + 0 | 0 | 0 | 7 + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 + 4 | 0 | 0 | 7 +(5 rows) + +-- test case +SELECT + time_bucket_gapfill(1,time,0,5), + min(time), + CASE WHEN min(time) IS NOT NULL THEN min(time) ELSE -1 END, + CASE WHEN min(time) IS NOT NULL THEN min(time) + 7 ELSE 0 END, + CASE WHEN 1 = 1 THEN 1 ELSE 0 END +FROM (VALUES (1,1),(2,2),(3,3)) v(time,value) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min | case | case | case +---------------------+-----+------+------+------ + 0 | | -1 | 0 | 1 + 1 | 1 | 1 | 8 | 1 + 2 | 2 | 2 | 9 | 1 + 3 | 3 | 3 | 10 | 1 + 4 | | -1 | 0 | 1 +(5 rows) + +-- test constants +SELECT + time_bucket_gapfill(1,time,0,5), + min(time), min(time), 4 as c +FROM (VALUES (1),(2),(3)) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min | min | c +---------------------+-----+-----+--- + 0 | | | 4 + 1 | 1 | 1 | 4 + 2 | 2 | 2 | 4 + 3 | 3 | 3 | 4 + 4 | | | 4 +(5 rows) + +-- test column reordering +SELECT + 1 as c1, '2' as c2, + time_bucket_gapfill(1,time,0,5), + 3.0 as c3, + min(time), min(time), 4 as c4 +FROM (VALUES (1),(2),(3)) v(time) +GROUP BY 3 ORDER BY 3; + c1 | c2 | time_bucket_gapfill | c3 | min | min | c4 +----+----+---------------------+-----+-----+-----+---- + 1 | 2 | 0 | 3.0 | | | 4 + 1 | 2 | 1 | 3.0 | 1 | 1 | 4 + 1 | 2 | 2 | 3.0 | 2 | 2 | 4 + 1 | 2 | 3 | 3.0 | 3 | 3 | 4 + 1 | 2 | 4 | 3.0 | | | 4 +(5 rows) + +-- test timestamptz +SELECT + time_bucket_gapfill(INTERVAL '6h',time,TIMESTAMPTZ '2000-01-01',TIMESTAMPTZ '2000-01-02'), + min(time) +FROM (VALUES (TIMESTAMPTZ '2000-01-01 9:00:00'),(TIMESTAMPTZ '2000-01-01 18:00:00')) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +------------------------------+------------------------------ + Fri Dec 31 22:00:00 1999 PST | + Sat Jan 01 04:00:00 2000 PST | Sat Jan 01 09:00:00 2000 PST + Sat Jan 01 10:00:00 2000 PST | + Sat Jan 01 16:00:00 2000 PST | Sat Jan 01 18:00:00 2000 PST + Sat Jan 01 22:00:00 2000 PST | +(5 rows) + +-- test timestamp +SELECT + time_bucket_gapfill(INTERVAL '6h',time,TIMESTAMP '2000-01-01',TIMESTAMP '2000-01-02'), + min(time) +FROM (VALUES (TIMESTAMP '2000-01-01 9:00:00'),(TIMESTAMP '2000-01-01 18:00:00')) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +--------------------------+-------------------------- + Sat Jan 01 00:00:00 2000 | + Sat Jan 01 06:00:00 2000 | Sat Jan 01 09:00:00 2000 + Sat Jan 01 12:00:00 2000 | + Sat Jan 01 18:00:00 2000 | Sat Jan 01 18:00:00 2000 +(4 rows) + +-- test date +SELECT + time_bucket_gapfill(INTERVAL '1w',time,DATE '2000-01-01',DATE '2000-02-10'), + min(time) +FROM (VALUES (DATE '2000-01-08'),(DATE '2000-01-22')) v(time) +GROUP BY 1 ORDER BY 1; + time_bucket_gapfill | min +---------------------+------------ + 12-27-1999 | + 01-03-2000 | 01-08-2000 + 01-10-2000 | + 01-17-2000 | 01-22-2000 + 01-24-2000 | + 01-31-2000 | + 02-07-2000 | +(7 rows) + +-- test grouping by non-time columns +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m +FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +GROUP BY 1,id ORDER BY 2,1; + time | id | m +------+----+--- + 0 | 1 | + 1 | 1 | 1 + 2 | 1 | + 3 | 1 | + 4 | 1 | + 0 | 2 | + 1 | 2 | + 2 | 2 | 2 + 3 | 2 | + 4 | 2 | +(10 rows) + +-- test grouping by non-time columns with no rows in resultset +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m +FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +WHERE false +GROUP BY 1,id ORDER BY 2,1; + time | id | m +------+----+--- +(0 rows) + +-- test duplicate columns in GROUP BY +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + id, + min(value) as m +FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +GROUP BY 1,2,3 ORDER BY 2,1; + time | id | id | m +------+----+----+--- + 0 | 1 | 1 | + 1 | 1 | 1 | 1 + 2 | 1 | 1 | + 3 | 1 | 1 | + 4 | 1 | 1 | + 0 | 2 | 2 | + 1 | 2 | 2 | + 2 | 2 | 2 | 2 + 3 | 2 | 2 | + 4 | 2 | 2 | +(10 rows) + +-- test grouping by columns not in resultset +SELECT + time_bucket_gapfill(1,time,0,5) as time, + min(value) as m +FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +GROUP BY 1,id ORDER BY id,1; + time | m +------+--- + 0 | + 1 | 1 + 2 | + 3 | + 4 | + 0 | + 1 | + 2 | 2 + 3 | + 4 | +(10 rows) + +-- test grouping by non-time columns with text columns +SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + min(value) as m +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color ORDER BY 2,1; + time | color | m +------+-------+--- + 0 | blue | + 1 | blue | 1 + 2 | blue | + 3 | blue | + 4 | blue | + 0 | red | + 1 | red | + 2 | red | 2 + 3 | red | + 4 | red | +(10 rows) + +-- test grouping by non-time columns with text columns with no rows in resultset +SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + min(value) as m +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +WHERE false +GROUP BY 1,color ORDER BY 2,1; + time | color | m +------+-------+--- +(0 rows) + +--- test insert into SELECT +CREATE TABLE gapfill_insert_test(id INT); +INSERT INTO gapfill_insert_test SELECT time_bucket_gapfill(1,time,1,5) FROM (VALUES (1),(2)) v(time) GROUP BY 1 ORDER BY 1; +SELECT * FROM gapfill_insert_test; + id + 1 + 2 + 3 + 4 +(4 rows) + +-- test join +SELECT t1.*,t2.m FROM +( + SELECT + time_bucket_gapfill(1,time,0,5) as time, color, min(value) as m + FROM + (VALUES (1,'red',1),(2,'blue',2)) v(time,color,value) + GROUP BY 1,color ORDER BY 2,1 +) t1 INNER JOIN +( + SELECT + time_bucket_gapfill(1,time,0,5) as time, color, min(value) as m + FROM + (VALUES (3,'red',1),(4,'blue',2)) v(time,color,value) + GROUP BY 1,color ORDER BY 2,1 +) t2 ON t1.time = t2.time AND t1.color=t2.color; + time | color | m | m +------+-------+---+--- + 0 | blue | | + 1 | blue | | + 2 | blue | 2 | + 3 | blue | | + 4 | blue | | 2 + 0 | red | | + 1 | red | 1 | + 2 | red | | + 3 | red | | 1 + 4 | red | | +(10 rows) + +-- test join with locf +SELECT t1.*,t2.m FROM +( + SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + locf(min(value)) as locf + FROM + (VALUES (0,'red',1),(0,'blue',2)) v(time,color,value) + GROUP BY 1,color ORDER BY 2,1 +) t1 INNER JOIN +( + SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + locf(min(value)) as m + FROM + (VALUES (3,'red',1),(4,'blue',2)) v(time,color,value) + GROUP BY 1,color ORDER BY 2,1 +) t2 ON t1.time = t2.time AND t1.color=t2.color; + time | color | locf | m +------+-------+------+--- + 0 | blue | 2 | + 1 | blue | 2 | + 2 | blue | 2 | + 3 | blue | 2 | + 4 | blue | 2 | 2 + 0 | red | 1 | + 1 | red | 1 | + 2 | red | 1 | + 3 | red | 1 | 1 + 4 | red | 1 | 1 +(10 rows) + +-- test locf +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value)) AS value +FROM (values (10,9),(20,3),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | 3 + 40 | 3 + 50 | 6 +(6 rows) + +-- test locf with NULLs in resultset +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value)) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | + 40 | + 50 | 6 +(6 rows) + +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=false) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | + 40 | + 50 | 6 +(6 rows) + +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=NULL) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | + 40 | + 50 | 6 +(6 rows) + +-- test locf with NULLs in resultset and treat_null_as_missing +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=true) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | + 10 | 9 + 20 | 3 + 30 | 3 + 40 | 3 + 50 | 6 +(6 rows) + +-- test locf with NULLs in first row of resultset and treat_null_as_missing with lookup query +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=false, prev := (SELECT 100)) AS v1, + locf(min(value),treat_null_as_missing:=true, prev := (SELECT 100)) AS v2 +FROM (values (0,NULL),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | v1 | v2 +------+----+----- + 0 | | 100 + 10 | | 100 + 20 | | 100 + 30 | | 100 + 40 | | 100 + 50 | 6 | 6 +(6 rows) + +-- test locf with NULLs in resultset and treat_null_as_missing with resort +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + locf(min(value),treat_null_as_missing:=true) AS value +FROM (values (10,9),(20,3),(30,NULL),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1 DESC; + time | value +------+------- + 50 | 6 + 40 | 3 + 30 | 3 + 20 | 3 + 10 | 9 + 0 | +(6 rows) + +-- test locf with constants +SELECT + time_bucket_gapfill(1,time,0,5), + 2, + locf(min(value)) +FROM (VALUES (0,1,3),(4,2,3)) v(time,value) +GROUP BY 1; + time_bucket_gapfill | ?column? | locf +---------------------+----------+------ + 0 | 2 | 1 + 1 | 2 | 1 + 2 | 2 | 1 + 3 | 2 | 1 + 4 | 2 | 2 +(5 rows) + +-- test expressions inside locf +SELECT + time_bucket_gapfill(1,time,0,5), + locf(min(value)), + locf(4), + locf(4 + min(value)) +FROM (VALUES (0,1,3),(4,2,3)) v(time,value) +GROUP BY 1; + time_bucket_gapfill | locf | locf | locf +---------------------+------+------+------ + 0 | 1 | 4 | 5 + 1 | 1 | 4 | 5 + 2 | 1 | 4 | 5 + 3 | 1 | 4 | 5 + 4 | 2 | 4 | 6 +(5 rows) + +-- test locf with out of boundary lookup +SELECT + time_bucket_gapfill(10,time,0,70) AS time, + locf(min(value),(SELECT 100)) AS value +FROM (values (20,9),(40,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | 100 + 10 | 100 + 20 | 9 + 30 | 9 + 40 | 6 + 50 | 6 + 60 | 6 +(7 rows) + +-- test locf with different datatypes +SELECT + time_bucket_gapfill(1,time,0,5) as time, + locf(min(v1)) AS text, + locf(min(v2)) AS "int[]", + locf(min(v3)) AS "text 4/8k" +FROM (VALUES + (1,'foo',ARRAY[1,2,3],repeat('4k',2048)), + (3,'bar',ARRAY[3,4,5],repeat('8k',4096)) +) v(time,v1,v2,v3) +GROUP BY 1; + time | text | int[] | text 4/8k +------+------+---------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 0 | | | + 1 | foo | {1,2,3} | 4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k + 2 | foo | {1,2,3} | 4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k + 3 | bar | {3,4,5} | 8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k + 4 | bar | {3,4,5} | 8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k +(5 rows) + +-- test locf with different datatypes and treat_null_as_missing +SELECT + time_bucket_gapfill(1,time,0,5) as time, + locf(min(v1),treat_null_as_missing:=true) AS text, + locf(min(v2),treat_null_as_missing:=true) AS "int[]", + locf(min(v3),treat_null_as_missing:=true) AS "text 4/8k" +FROM (VALUES + (1,'foo',ARRAY[1,2,3],repeat('4k',2048)), + (2,NULL,NULL,NULL), + (3,'bar',ARRAY[3,4,5],repeat('8k',4096)) +) v(time,v1,v2,v3) +GROUP BY 1; + time | text | int[] | text 4/8k +------+------+---------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 0 | | | + 1 | foo | {1,2,3} | 4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k + 2 | foo | {1,2,3} | 4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k4k + 3 | bar | {3,4,5} | 8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k + 4 | bar | {3,4,5} | 8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k8k +(5 rows) + +-- test interpolate +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + interpolate(min(value)) AS value +FROM (values (0,1),(50,6)) v(time,value) +GROUP BY 1 ORDER BY 1; + time | value +------+------- + 0 | 1 + 10 | 2 + 20 | 3 + 30 | 4 + 40 | 5 + 50 | 6 +(6 rows) + +-- test interpolate with NULL values +SELECT + time_bucket_gapfill(1,time,0,5) AS time, + interpolate(avg(temp)) AS temp +FROM (VALUES (0,0),(2,NULL),(5,5)) v(time,temp) +GROUP BY 1; + time | temp +------+------ + 0 | 0 + 1 | + 2 | + 3 | + 4 | + 5 | 5 +(6 rows) + +-- test interpolate datatypes +SELECT + time_bucket_gapfill(10,time,0,50) AS time, + interpolate(min(v1)) AS "smallint", + interpolate(min(v2)) AS "int", + interpolate(min(v3)) AS "bigint", + interpolate(min(v4)) AS "float4", + interpolate(min(v5)) AS "float8" +FROM (values (0,-3::smallint,-3::int,-3::bigint,-3::float4,-3::float8),(50,3::smallint,3::int,3::bigint,3::float4,3::float8)) v(time,v1,v2,v3,v4,v5) +GROUP BY 1 ORDER BY 1; + time | smallint | int | bigint | float4 | float8 +------+----------+-----+--------+--------+-------- + 0 | -3 | -3 | -3 | -3 | -3 + 10 | -2 | -2 | -2 | -1.8 | -1.8 + 20 | -1 | -1 | -1 | -0.6 | -0.6 + 30 | 1 | 1 | 1 | 0.6 | 0.6 + 40 | 2 | 2 | 2 | 1.8 | 1.8 + 50 | 3 | 3 | 3 | 3 | 3 +(6 rows) + +-- test interpolate datatypes with negative time +SELECT + time_bucket_gapfill(10,time,-40,30) AS time, + interpolate(min(v1)) AS "smallint", + interpolate(min(v2)) AS "int", + interpolate(min(v3)) AS "bigint", + interpolate(min(v4)) AS "float4", + interpolate(min(v5)) AS "float8" +FROM (values (-40,-3::smallint,-3::int,-3::bigint,-3::float4,-3::float8),(20,3::smallint,3::int,3::bigint,3::float4,3::float8)) v(time,v1,v2,v3,v4,v5) +GROUP BY 1 ORDER BY 1; + time | smallint | int | bigint | float4 | float8 +------+----------+-----+--------+--------+-------- + -40 | -3 | -3 | -3 | -3 | -3 + -30 | -2 | -2 | -2 | -2 | -2 + -20 | -1 | -1 | -1 | -1 | -1 + -10 | 0 | 0 | 0 | 0 | 0 + 0 | 1 | 1 | 1 | 1 | 1 + 10 | 2 | 2 | 2 | 2 | 2 + 20 | 3 | 3 | 3 | 3 | 3 +(7 rows) + +-- test interpolate with multiple groupings +SELECT + time_bucket_gapfill(5,time,0,11), + device, + interpolate(min(v1),(SELECT (-10,-10)),(SELECT (20,10))) +FROM (VALUES (5,1,0),(5,2,0)) as v(time,device,v1) +GROUP BY 1,2 ORDER BY 2,1; + time_bucket_gapfill | device | interpolate +---------------------+--------+------------- + 0 | 1 | -3 + 5 | 1 | 0 + 10 | 1 | 3 + 0 | 2 | -3 + 5 | 2 | 0 + 10 | 2 | 3 +(6 rows) + +-- test cte with gap filling in outer query +WITH data AS ( + SELECT * FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) +) +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m +FROM data +GROUP BY 1,id; + time | id | m +------+----+--- + 0 | 1 | + 1 | 1 | 1 + 2 | 1 | + 3 | 1 | + 4 | 1 | + 0 | 2 | + 1 | 2 | + 2 | 2 | 2 + 3 | 2 | + 4 | 2 | +(10 rows) + +-- test cte with gap filling in inner query +WITH gapfill AS ( + SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m + FROM (VALUES (1,1,1),(2,2,2)) v(time,id,value) + GROUP BY 1,id +) +SELECT * FROM gapfill; + time | id | m +------+----+--- + 0 | 1 | + 1 | 1 | 1 + 2 | 1 | + 3 | 1 | + 4 | 1 | + 0 | 2 | + 1 | 2 | + 2 | 2 | 2 + 3 | 2 | + 4 | 2 | +(10 rows) + +-- test window functions +SELECT + time_bucket_gapfill(10,time,0,60), + interpolate(min(time)), + lag(min(time)) OVER () +FROM (VALUES (0),(50)) v(time) +GROUP BY 1; + time_bucket_gapfill | interpolate | lag +---------------------+-------------+----- + 0 | 0 | + 10 | 10 | 0 + 20 | 20 | + 30 | 30 | + 40 | 40 | + 50 | 50 | +(6 rows) + +-- test window functions with multiple windows +SELECT + time_bucket_gapfill(1,time,0,10), + interpolate(min(time)), + row_number() OVER (), + locf(min(time)), + sum(interpolate(min(time))) OVER (ROWS 1 PRECEDING), + sum(interpolate(min(time))) OVER (ROWS 2 PRECEDING), + sum(interpolate(min(time))) OVER (ROWS 3 PRECEDING), + sum(interpolate(min(time))) OVER (ROWS 4 PRECEDING) +FROM (VALUES (0),(9)) v(time) +GROUP BY 1; + time_bucket_gapfill | interpolate | row_number | locf | sum | sum | sum | sum +---------------------+-------------+------------+------+-----+-----+-----+----- + 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 + 1 | 1 | 2 | 0 | 1 | 1 | 1 | 1 + 2 | 2 | 3 | 0 | 3 | 3 | 3 | 3 + 3 | 3 | 4 | 0 | 5 | 6 | 6 | 6 + 4 | 4 | 5 | 0 | 7 | 9 | 10 | 10 + 5 | 5 | 6 | 0 | 9 | 12 | 14 | 15 + 6 | 6 | 7 | 0 | 11 | 15 | 18 | 20 + 7 | 7 | 8 | 0 | 13 | 18 | 22 | 25 + 8 | 8 | 9 | 0 | 15 | 21 | 26 | 30 + 9 | 9 | 10 | 9 | 17 | 24 | 30 | 35 +(10 rows) + +-- test window functions with constants +SELECT + time_bucket_gapfill(1,time,0,5), + min(time), + 4 as c, + lag(min(time)) OVER () +FROM (VALUES (1),(2),(3)) v(time) +GROUP BY 1; + time_bucket_gapfill | min | c | lag +---------------------+-----+---+----- + 0 | | 4 | + 1 | 1 | 4 | + 2 | 2 | 4 | 1 + 3 | 3 | 4 | 2 + 4 | | 4 | 3 +(5 rows) + +--test window functions with locf +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) AS "min", + lag(min(time)) over () AS lag_min, + lead(min(time)) over () AS lead_min, + locf(min(time)) AS locf, + lag(locf(min(time))) over () AS lag_locf, + lead(locf(min(time))) over () AS lead_locf +FROM (VALUES (1),(2)) v(time) +GROUP BY 1; + time_bucket_gapfill | min | lag_min | lead_min | locf | lag_locf | lead_locf +---------------------+-----+---------+----------+------+----------+----------- + 0 | | | 1 | | | 1 + 1 | 1 | | 2 | 1 | | 2 + 2 | 2 | 1 | | 2 | 1 | 2 + 3 | | 2 | | 2 | 2 | 2 + 4 | | | | 2 | 2 | +(5 rows) + +--test window functions with interpolate +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) AS "min", + lag(min(time)) over () AS lag_min, + lead(min(time)) over () AS lead_min, + interpolate(min(time)) AS interpolate, + lag(interpolate(min(time))) over () AS lag_interpolate, + lead(interpolate(min(time))) over () AS lead_interpolate +FROM (VALUES (1),(3)) v(time) +GROUP BY 1; + time_bucket_gapfill | min | lag_min | lead_min | interpolate | lag_interpolate | lead_interpolate +---------------------+-----+---------+----------+-------------+-----------------+------------------ + 0 | | | 1 | | | 1 + 1 | 1 | | | 1 | | 2 + 2 | | 1 | 3 | 2 | 1 | 3 + 3 | 3 | | | 3 | 2 | + 4 | | 3 | | | 3 | +(5 rows) + +--test window functions with expressions +SELECT + time_bucket_gapfill(1,time,0,5), + min(time) AS "min", + lag(min(time)) over () AS lag_min, + 1 + lag(min(time)) over () AS lag_min, + interpolate(min(time)) AS interpolate, + lag(interpolate(min(time))) over () AS lag_interpolate, + 1 + lag(interpolate(min(time))) over () AS lag_interpolate +FROM (VALUES (1),(3)) v(time) +GROUP BY 1; + time_bucket_gapfill | min | lag_min | lag_min | interpolate | lag_interpolate | lag_interpolate +---------------------+-----+---------+---------+-------------+-----------------+----------------- + 0 | | | | | | + 1 | 1 | | | 1 | | + 2 | | 1 | 2 | 2 | 1 | 2 + 3 | 3 | | | 3 | 2 | 3 + 4 | | 3 | 4 | | 3 | 4 +(5 rows) + +--test row_number/rank/percent_rank/... window functions with gapfill reference +SELECT + time_bucket_gapfill(1,time,0,5), + ntile(2) OVER () AS ntile_2, + ntile(3) OVER () AS ntile_3, + ntile(5) OVER () AS ntile_5, + row_number() OVER (), + cume_dist() OVER (ORDER BY time_bucket_gapfill(1,time,0,5)), + rank() OVER (), + rank() OVER (ORDER BY time_bucket_gapfill(1,time,0,5)), + percent_rank() OVER (ORDER BY time_bucket_gapfill(1,time,0,5)) +FROM (VALUES (1),(3)) v(time) +GROUP BY 1; + time_bucket_gapfill | ntile_2 | ntile_3 | ntile_5 | row_number | cume_dist | rank | rank | percent_rank +---------------------+---------+---------+---------+------------+-----------+------+------+-------------- + 0 | 1 | 1 | 1 | 1 | 0.2 | 1 | 1 | 0 + 1 | 1 | 1 | 2 | 2 | 0.4 | 1 | 2 | 0.25 + 2 | 1 | 2 | 3 | 3 | 0.6 | 1 | 3 | 0.5 + 3 | 2 | 2 | 4 | 4 | 0.8 | 1 | 4 | 0.75 + 4 | 2 | 3 | 5 | 5 | 1 | 1 | 5 | 1 +(5 rows) + +-- test first_value/last_value/nth_value +SELECT + time_bucket_gapfill(1,time,0,5), + first_value(min(time)) OVER (), + nth_value(min(time),3) OVER (), + last_value(min(time)) OVER () +FROM (VALUES (0),(2),(5)) v(time) +GROUP BY 1; + time_bucket_gapfill | first_value | nth_value | last_value +---------------------+-------------+-----------+------------ + 0 | 0 | 2 | 5 + 1 | 0 | 2 | 5 + 2 | 0 | 2 | 5 + 3 | 0 | 2 | 5 + 4 | 0 | 2 | 5 + 5 | 0 | 2 | 5 +(6 rows) + +-- test window functions with PARTITION BY +SELECT + time_bucket_gapfill(1,time,0,5) as time, + color, + row_number() OVER (), + row_number() OVER (PARTITION BY color) +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color ORDER BY 2,1; + time | color | row_number | row_number +------+-------+------------+------------ + 0 | blue | 1 | 1 + 1 | blue | 2 | 2 + 2 | blue | 3 | 3 + 3 | blue | 4 | 4 + 4 | blue | 5 | 5 + 0 | red | 6 | 1 + 1 | red | 7 | 2 + 2 | red | 8 | 3 + 3 | red | 9 | 4 + 4 | red | 10 | 5 +(10 rows) + +-- test multiple windows +\set ON_ERROR_STOP 0 +SELECT + time_bucket_gapfill(1,time,0,11), + first_value(interpolate(min(time))) OVER (ROWS 1 PRECEDING), + interpolate(min(time)), + last_value(interpolate(min(time))) OVER (ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +FROM (VALUES (0),(10)) v(time) +GROUP BY 1; + time_bucket_gapfill | first_value | interpolate | last_value +---------------------+-------------+-------------+------------ + 0 | 0 | 0 | 1 + 1 | 0 | 1 | 2 + 2 | 1 | 2 | 3 + 3 | 2 | 3 | 4 + 4 | 3 | 4 | 5 + 5 | 4 | 5 | 6 + 6 | 5 | 6 | 7 + 7 | 6 | 7 | 8 + 8 | 7 | 8 | 9 + 9 | 8 | 9 | 10 + 10 | 9 | 10 | 10 +(11 rows) + +-- test reorder +SELECT + time_bucket_gapfill(1,time,0,5) as time, + id, + min(value) as m +FROM + (VALUES (1,1,1),(2,2,2)) v(time,id,value) +GROUP BY 1,id ORDER BY 1,id; + time | id | m +------+----+--- + 0 | 1 | + 0 | 2 | + 1 | 1 | 1 + 1 | 2 | + 2 | 1 | + 2 | 2 | 2 + 3 | 1 | + 3 | 2 | + 4 | 1 | + 4 | 2 | +(10 rows) + +-- test order by locf +SELECT + time_bucket_gapfill(1,time,1,6), + locf(min(time)) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 1,2; + time_bucket_gapfill | locf +---------------------+------ + 1 | + 2 | 2 + 3 | 3 + 4 | 3 + 5 | 3 +(5 rows) + +SELECT + time_bucket_gapfill(1,time,1,6), + locf(min(time)) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 2 NULLS FIRST,1; + time_bucket_gapfill | locf +---------------------+------ + 1 | + 2 | 2 + 3 | 3 + 4 | 3 + 5 | 3 +(5 rows) + +SELECT + time_bucket_gapfill(1,time,1,6), + locf(min(time)) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 2 NULLS LAST,1; + time_bucket_gapfill | locf +---------------------+------ + 2 | 2 + 3 | 3 + 4 | 3 + 5 | 3 + 1 | +(5 rows) + +-- test order by interpolate +SELECT + time_bucket_gapfill(1,time,1,6), + interpolate(min(time),prev:=(0,0)::record) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 1,2; + time_bucket_gapfill | interpolate +---------------------+------------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | + 5 | +(5 rows) + +SELECT + time_bucket_gapfill(1,time,1,6), + interpolate(min(time),prev:=(0,0)::record) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 2 NULLS FIRST,1; + time_bucket_gapfill | interpolate +---------------------+------------- + 4 | + 5 | + 1 | 1 + 2 | 2 + 3 | 3 +(5 rows) + +SELECT + time_bucket_gapfill(1,time,1,6), + interpolate(min(time),prev:=(0,0)::record) +FROM + (VALUES (2),(3)) v(time) +GROUP BY 1 ORDER BY 2 NULLS LAST,1; + time_bucket_gapfill | interpolate +---------------------+------------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | + 5 | +(5 rows) + +-- test queries on hypertable +-- test locf and interpolate together +SELECT + time_bucket_gapfill(interval '1h',time,timestamptz '2018-01-01 05:00:00-8', timestamptz '2018-01-01 07:00:00-8'), + device_id, + locf(avg(v1)) AS locf_v1, + locf(min(v2)) AS locf_v2, + interpolate(avg(v1)) AS interpolate_v1, + interpolate(avg(v2)) AS interpolate_v2 +FROM metrics_tstz +GROUP BY 1,2 +ORDER BY 1,2; + time_bucket_gapfill | device_id | locf_v1 | locf_v2 | interpolate_v1 | interpolate_v2 +------------------------------+-----------+---------+---------+----------------+---------------- + Mon Jan 01 05:00:00 2018 PST | 1 | 0.5 | 10 | 0.5 | 10 + Mon Jan 01 05:00:00 2018 PST | 2 | 0.7 | 20 | 0.7 | 20 + Mon Jan 01 05:00:00 2018 PST | 3 | 0.9 | 30 | 0.9 | 30 + Mon Jan 01 06:00:00 2018 PST | 1 | 0.5 | 10 | 0.25 | 5 + Mon Jan 01 06:00:00 2018 PST | 2 | 0.7 | 20 | 1.05 | 30 + Mon Jan 01 06:00:00 2018 PST | 3 | 0.9 | 30 | 0.9 | 30 + Mon Jan 01 07:00:00 2018 PST | 1 | 0 | 0 | 0 | 0 + Mon Jan 01 07:00:00 2018 PST | 2 | 1.4 | 40 | 1.4 | 40 + Mon Jan 01 07:00:00 2018 PST | 3 | 0.9 | 30 | 0.9 | 30 +(9 rows) + +SELECT + time_bucket_gapfill('12h'::interval,time,'2017-01-01'::timestamptz, '2017-01-02'::timestamptz), + interpolate( + avg(v1), + (SELECT ('2017-01-01'::timestamptz,1::float)), + (SELECT ('2017-01-02'::timestamptz,2::float)) + ) +FROM metrics_tstz WHERE time < '2017-01-01' GROUP BY 1; + time_bucket_gapfill | interpolate +------------------------------+------------------- + Sat Dec 31 16:00:00 2016 PST | 0.666666666666667 + Sun Jan 01 04:00:00 2017 PST | 1.16666666666667 + Sun Jan 01 16:00:00 2017 PST | 1.66666666666667 +(3 rows) + +SELECT + time_bucket_gapfill('12h'::interval,time,'2017-01-01'::timestamptz, '2017-01-02'::timestamptz), + interpolate( + avg(v1), + (SELECT ('2017-01-01'::timestamptz,1::float)), + (SELECT ('2017-01-02'::timestamptz,2::float)) + ) +FROM metrics_tstz WHERE time_bucket_gapfill('12h'::interval,time,'2017-01-01'::timestamptz, '2017-01-02'::timestamptz) < '2017-01-01' GROUP BY 1; + time_bucket_gapfill | interpolate +------------------------------+------------------- + Sat Dec 31 16:00:00 2016 PST | 0.666666666666667 + Sun Jan 01 04:00:00 2017 PST | 1.16666666666667 + Sun Jan 01 16:00:00 2017 PST | 1.66666666666667 +(3 rows) + +-- interpolation with correlated subquery lookup before interval +SELECT + time_bucket_gapfill('1h'::interval,time,'2018-01-01 3:00 PST'::timestamptz, '2018-01-01 8:00 PST'::timestamptz), + device_id, + interpolate( + avg(v1), + (SELECT (time,0.5::float) FROM metrics_tstz m2 WHERE m1.device_id=m2.device_id ORDER BY time DESC LIMIT 1) + ), + avg(v1) +FROM metrics_tstz m1 +WHERE device_id=1 GROUP BY 1,2 ORDER BY 1,2; + time_bucket_gapfill | device_id | interpolate | avg +------------------------------+-----------+-------------+----- + Mon Jan 01 03:00:00 2018 PST | 1 | 0.5 | + Mon Jan 01 04:00:00 2018 PST | 1 | 0.5 | + Mon Jan 01 05:00:00 2018 PST | 1 | 0.5 | 0.5 + Mon Jan 01 06:00:00 2018 PST | 1 | 0.25 | + Mon Jan 01 07:00:00 2018 PST | 1 | 0 | 0 +(5 rows) + +-- interpolation with correlated subquery lookup after interval +SELECT + time_bucket_gapfill('1h'::interval,time,'2018-01-01 5:00 PST'::timestamptz, '2018-01-01 9:00 PST'::timestamptz), + device_id, + interpolate( + avg(v1), + next=>(SELECT (time,v2::float) FROM metrics_tstz m2 WHERE m1.device_id=m2.device_id ORDER BY time LIMIT 1) + ),avg(v1) +FROM metrics_tstz m1 WHERE device_id=1 GROUP BY 1,2 ORDER BY 1,2; + time_bucket_gapfill | device_id | interpolate | avg +------------------------------+-----------+-------------+----- + Mon Jan 01 05:00:00 2018 PST | 1 | 0.5 | 0.5 + Mon Jan 01 06:00:00 2018 PST | 1 | 0.25 | + Mon Jan 01 07:00:00 2018 PST | 1 | 0 | 0 + Mon Jan 01 08:00:00 2018 PST | 1 | -5 | +(4 rows) + +\set ON_ERROR_STOP 0 +-- bucket_width non simple expression +SELECT + time_bucket_gapfill(t,t) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: bucket_width must be a simple expression +-- no start/finish and no usable time constraints +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- NULL start/finish and no usable time constraints +SELECT + time_bucket_gapfill(1,t,NULL,NULL) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- no start and no usable time constraints +SELECT + time_bucket_gapfill(1,t,finish:=1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- NULL start expression and no usable time constraints +SELECT + time_bucket_gapfill(1,t,CASE WHEN length(version())>0 THEN NULL::int ELSE NULL::int END,1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- unsupported start expression and no usable time constraints +SELECT + time_bucket_gapfill(1,t,t,1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start must be a simple expression +-- NULL start and no usable time constraints +SELECT + time_bucket_gapfill(1,t,NULL,1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- NULL finish expression and no usable time constraints +SELECT + time_bucket_gapfill(1,t,1,CASE WHEN length(version())>0 THEN NULL::int ELSE NULL::int END) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- unsupported finish expression and no usable time constraints +SELECT + time_bucket_gapfill(1,t,1,t) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish must be a simple expression +-- no finish and no usable time constraints +SELECT + time_bucket_gapfill(1,t,1) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer finish from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- NULL finish and no usable time constraints +SELECT + time_bucket_gapfill(1,t,1,NULL) +FROM (VALUES (1),(2)) v(t) +WHERE true AND true +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer finish from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with column reference on right side +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE t > t AND t < 2 +GROUP BY 1; +ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with cast +SELECT + time_bucket_gapfill(1,t1::int8) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 >= 1 AND t1 <= 2 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts needs to refer to a single column if no start or finish is supplied +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with multiple column references +SELECT + time_bucket_gapfill(1,t1+t2) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > 1 AND t1 < 2 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts needs to refer to a single column if no start or finish is supplied +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with NULL start in WHERE clause, we use CASE to wrap the NULL so it doesnt get folded +SELECT + time_bucket_gapfill(1,t1) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > CASE WHEN length(version()) > 0 THEN NULL::int ELSE NULL::int END AND t1 < 4 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- expression with NULL finish in WHERE clause, we use CASE to wrap the NULL so it doesnt get folded +SELECT + time_bucket_gapfill(1,t1) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > 0 AND t1 < CASE WHEN length(version()) > 0 THEN NULL::int ELSE NULL::int END +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- non-Const NULL as start argument, we use CASE to wrap the NULL so it doesnt get folded +SELECT + time_bucket_gapfill(1,t1,CASE WHEN length(version())>0 THEN NULL::int ELSE NULL::int END) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > 0 AND t1 < 2 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: start cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- non-Const NULL as finish argument, we use CASE to wrap the NULL so it doesnt get folded +SELECT + time_bucket_gapfill(1,t1,NULL,CASE WHEN length(version())>0 THEN NULL::int ELSE NULL::int END) +FROM (VALUES (1,2),(2,2)) v(t1,t2) +WHERE t1 > 0 AND t1 < 2 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: finish cannot be NULL +HINT: Specify start and finish as arguments or in the WHERE clause. +-- time_bucket_gapfill with constraints ORed +SELECT + time_bucket_gapfill(1::int8,t::int8) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= -1 OR t < 3 +GROUP BY 1; +ERROR: invalid time_bucket_gapfill argument: ts needs to refer to a single column if no start or finish is supplied +HINT: Specify start and finish as arguments or in the WHERE clause. +\set ON_ERROR_STOP 1 +-- int32 time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= -1 AND t < 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 +(4 rows) + +-- same query with less or equal as finish +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= -1 AND t <= 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 + 3 +(5 rows) + +-- int32 time_bucket_gapfill with start column and value switched +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + -1 < t AND t < 3 +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 +(3 rows) + +-- int32 time_bucket_gapfill with finish column and value switched +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= 0 AND 3 >= t +GROUP BY 1; + time_bucket_gapfill + 0 + 1 + 2 + 3 +(4 rows) + +-- int16 time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill(1::int2,t) +FROM (VALUES (1::int2),(2::int2)) v(t) +WHERE + t >= -1 AND t < 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 +(4 rows) + +-- int64 time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill(1::int8,t) +FROM (VALUES (1::int8),(2::int8)) v(t) +WHERE + t >= -1 AND t < 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 +(4 rows) + +-- date time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill('1d'::interval,t) +FROM (VALUES ('1999-12-30'::date),('2000-01-01'::date)) v(t) +WHERE + t >= '1999-12-29' AND t < '2000-01-03' +GROUP BY 1; + time_bucket_gapfill + 12-29-1999 + 12-30-1999 + 12-31-1999 + 01-01-2000 + 01-02-2000 +(5 rows) + +-- timestamp time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill('12h'::interval,t) +FROM (VALUES ('1999-12-30'::timestamp),('2000-01-01'::timestamp)) v(t) +WHERE + t >= '1999-12-29' AND t < '2000-01-03' +GROUP BY 1; + time_bucket_gapfill + Wed Dec 29 00:00:00 1999 + Wed Dec 29 12:00:00 1999 + Thu Dec 30 00:00:00 1999 + Thu Dec 30 12:00:00 1999 + Fri Dec 31 00:00:00 1999 + Fri Dec 31 12:00:00 1999 + Sat Jan 01 00:00:00 2000 + Sat Jan 01 12:00:00 2000 + Sun Jan 02 00:00:00 2000 + Sun Jan 02 12:00:00 2000 +(10 rows) + +-- timestamptz time_bucket_gapfill with no start/finish +SELECT + time_bucket_gapfill('12h'::interval,t) +FROM (VALUES ('1999-12-30'::timestamptz),('2000-01-01'::timestamptz)) v(t) +WHERE + t >= '1999-12-29' AND t < '2000-01-03' +GROUP BY 1; + time_bucket_gapfill + Tue Dec 28 16:00:00 1999 PST + Wed Dec 29 04:00:00 1999 PST + Wed Dec 29 16:00:00 1999 PST + Thu Dec 30 04:00:00 1999 PST + Thu Dec 30 16:00:00 1999 PST + Fri Dec 31 04:00:00 1999 PST + Fri Dec 31 16:00:00 1999 PST + Sat Jan 01 04:00:00 2000 PST + Sat Jan 01 16:00:00 2000 PST + Sun Jan 02 04:00:00 2000 PST + Sun Jan 02 16:00:00 2000 PST +(11 rows) + +-- timestamptz time_bucket_gapfill with more complex expression +SELECT + time_bucket_gapfill('12h'::interval,t) +FROM (VALUES ('1999-12-30'::timestamptz),('2000-01-01'::timestamptz)) v(t) +WHERE + t >= '2000-01-03'::timestamptz - '4d'::interval AND t < '2000-01-03' +GROUP BY 1; + time_bucket_gapfill + Wed Dec 29 16:00:00 1999 PST + Thu Dec 30 04:00:00 1999 PST + Thu Dec 30 16:00:00 1999 PST + Fri Dec 31 04:00:00 1999 PST + Fri Dec 31 16:00:00 1999 PST + Sat Jan 01 04:00:00 2000 PST + Sat Jan 01 16:00:00 2000 PST + Sun Jan 02 04:00:00 2000 PST + Sun Jan 02 16:00:00 2000 PST +(9 rows) + +-- timestamptz time_bucket_gapfill with different datatype in finish constraint +SELECT + time_bucket_gapfill('12h'::interval,t) +FROM (VALUES ('1999-12-30'::timestamptz),('2000-01-01'::timestamptz)) v(t) +WHERE + t >= '2000-01-03'::timestamptz - '4d'::interval AND t < '2000-01-03'::date +GROUP BY 1; + time_bucket_gapfill + Wed Dec 29 16:00:00 1999 PST + Thu Dec 30 04:00:00 1999 PST + Thu Dec 30 16:00:00 1999 PST + Fri Dec 31 04:00:00 1999 PST + Fri Dec 31 16:00:00 1999 PST + Sat Jan 01 04:00:00 2000 PST + Sat Jan 01 16:00:00 2000 PST + Sun Jan 02 04:00:00 2000 PST + Sun Jan 02 16:00:00 2000 PST +(9 rows) + +-- time_bucket_gapfill with now() as start +SELECT + time_bucket_gapfill('1h'::interval,t) +FROM (VALUES (now()),(now())) v(t) +WHERE + t >= now() AND t < now() - '1h'::interval +GROUP BY 1; + time_bucket_gapfill +(0 rows) + +-- time_bucket_gapfill with multiple constraints +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t >= -1 AND t < 3 and t>1 AND t <=4 AND length(version()) > 0 +GROUP BY 1; + time_bucket_gapfill + 2 +(1 row) + +-- int32 time_bucket_gapfill with greater for start +SELECT + time_bucket_gapfill(1,t) +FROM (VALUES (1),(2)) v(t) +WHERE + t > -2 AND t < 3 +GROUP BY 1; + time_bucket_gapfill + -1 + 0 + 1 + 2 +(4 rows) + +-- test DISTINCT +SELECT DISTINCT ON (color) + time_bucket_gapfill(1,time,0,5) as time, + color, + min(value) as m +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color ORDER BY 2,1; + time | color | m +------+-------+--- + 0 | blue | + 0 | red | +(2 rows) + +-- test DISTINCT with window functions +SELECT DISTINCT ON (row_number() OVER ()) + time_bucket_gapfill(1,time,0,5) as time, + color, + row_number() OVER () +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color; + time | color | row_number +------+-------+------------ + 0 | blue | 1 + 1 | blue | 2 + 2 | blue | 3 + 3 | blue | 4 + 4 | blue | 5 + 0 | red | 6 + 1 | red | 7 + 2 | red | 8 + 3 | red | 9 + 4 | red | 10 +(10 rows) + +-- test DISTINCT with window functions and PARTITION BY +SELECT DISTINCT ON (color,row_number() OVER (PARTITION BY color)) + time_bucket_gapfill(1,time,0,5) as time, + color, + row_number() OVER (PARTITION BY color) +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color; + time | color | row_number +------+-------+------------ + 0 | blue | 1 + 1 | blue | 2 + 2 | blue | 3 + 3 | blue | 4 + 4 | blue | 5 + 0 | red | 1 + 1 | red | 2 + 2 | red | 3 + 3 | red | 4 + 4 | red | 5 +(10 rows) + +-- test DISTINCT with window functions not in targetlist +SELECT DISTINCT ON (row_number() OVER ()) + time_bucket_gapfill(1,time,0,5) as time, + color, + row_number() OVER (PARTITION BY color) +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 1,color; + time | color | row_number +------+-------+------------ + 0 | blue | 1 + 1 | blue | 2 + 2 | blue | 3 + 3 | blue | 4 + 4 | blue | 5 + 0 | red | 1 + 1 | red | 2 + 2 | red | 3 + 3 | red | 4 + 4 | red | 5 +(10 rows) + +-- test column references +SELECT + row_number() OVER (PARTITION BY color), + locf(min(time)), + color, + time_bucket_gapfill(1,time,0,5) as time +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 3,4; + row_number | locf | color | time +------------+------+-------+------ + 1 | | blue | 0 + 2 | 1 | blue | 1 + 3 | 1 | blue | 2 + 4 | 1 | blue | 3 + 5 | 1 | blue | 4 + 1 | | red | 0 + 2 | | red | 1 + 3 | 2 | red | 2 + 4 | 2 | red | 3 + 5 | 2 | red | 4 +(10 rows) + +-- test with Nested Loop +SELECT l.id, bucket, data_value FROM + (VALUES (1), (2), (3), (4)) a(id) + INNER JOIN LATERAL ( + SELECT b.id id, time_bucket_gapfill('1'::int, time, start=>'1'::int, finish=> '5'::int) bucket, locf(last(data, time)) data_value + FROM (VALUES (1, 1, 1), (1, 4, 4), (2, 1, -1), (2, 4, -4)) b(id, time, data) + WHERE a.id = b.id + GROUP BY b.id, bucket + ) as l on (true); + id | bucket | data_value +----+--------+------------ + 1 | 1 | 1 + 1 | 2 | 1 + 1 | 3 | 1 + 1 | 4 | 4 + 2 | 1 | -1 + 2 | 2 | -1 + 2 | 3 | -1 + 2 | 4 | -4 +(8 rows) + +-- test prepared statement +PREPARE prep_gapfill AS +SELECT + time_bucket_gapfill(1,time,0,5) as time, + locf(min(value)) +FROM (VALUES (1,1),(2,2)) v(time,value) +GROUP BY 1; +-- execute 10 times to make sure turning it into generic plan works +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +EXECUTE prep_gapfill; + time | locf +------+------ + 0 | + 1 | 1 + 2 | 2 + 3 | 2 + 4 | 2 +(5 rows) + +DEALLOCATE prep_gapfill; +-- test column references with TIME_COLUMN last +SELECT + row_number() OVER (PARTITION BY color), + locf(min(time)), + color, + time_bucket_gapfill(1,time,0,5) as time +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 3,4; + row_number | locf | color | time +------------+------+-------+------ + 1 | | blue | 0 + 2 | 1 | blue | 1 + 3 | 1 | blue | 2 + 4 | 1 | blue | 3 + 5 | 1 | blue | 4 + 1 | | red | 0 + 2 | | red | 1 + 3 | 2 | red | 2 + 4 | 2 | red | 3 + 5 | 2 | red | 4 +(10 rows) + +-- test expressions on GROUP BY columns +SELECT + row_number() OVER (PARTITION BY color), + locf(min(time)), + color, + length(color), + time_bucket_gapfill(1,time,0,5) as time +FROM (VALUES (1,'blue',1),(2,'red',2)) v(time,color,value) +GROUP BY 3,5; + row_number | locf | color | length | time +------------+------+-------+--------+------ + 1 | | blue | 4 | 0 + 2 | 1 | blue | 4 | 1 + 3 | 1 | blue | 4 | 2 + 4 | 1 | blue | 4 | 3 + 5 | 1 | blue | 4 | 4 + 1 | | red | 3 | 0 + 2 | | red | 3 | 1 + 3 | 2 | red | 3 | 2 + 4 | 2 | red | 3 | 3 + 5 | 2 | red | 3 | 4 +(10 rows) + +-- test columns derived from GROUP BY columns with cast +SELECT + time_bucket_gapfill(1,time,0,5) as time, + device_id::text +FROM (VALUES (1,1),(2,2)) v(time,device_id) +GROUP BY 1,device_id; + time | device_id +------+----------- + 0 | 1 + 1 | 1 + 2 | 1 + 3 | 1 + 4 | 1 + 0 | 2 + 1 | 2 + 2 | 2 + 3 | 2 + 4 | 2 +(10 rows) + +-- test columns derived from GROUP BY columns with expression +SELECT + time_bucket_gapfill(1,time,0,5) as time, + 'Device ' || device_id::text +FROM (VALUES (1,1),(2,2)) v(time,device_id) +GROUP BY 1,device_id; + time | ?column? +------+---------- + 0 | Device 1 + 1 | Device 1 + 2 | Device 1 + 3 | Device 1 + 4 | Device 1 + 0 | Device 2 + 1 | Device 2 + 2 | Device 2 + 3 | Device 2 + 4 | Device 2 +(10 rows) + +--test interpolation with big differences in values (test overflows in calculations) +--we use the biggest possible difference in time(x) and the value(y). +--For bigints we also test values of smaller than bigintmax/min to avoid +--the symmetry where x=y (which catches more errors) +SELECT 9223372036854775807 as big_int_max \gset +SELECT -9223372036854775808 as big_int_min \gset +SELECT + time_bucket_gapfill(1,time,0,1) AS time, + interpolate(min(s)) AS "smallint", + interpolate(min(i)) AS "int", + interpolate(min(b)) AS "bigint", + interpolate(min(b2)) AS "bigint2", + interpolate(min(d)) AS "double" +FROM (values (:big_int_min,(-32768)::smallint,(-2147483648)::int,:big_int_min,-2147483648::bigint, '-Infinity'::double precision), + (:big_int_max, 32767::smallint, 2147483647::int,:big_int_max, 2147483647::bigint, 'Infinity'::double precision)) v(time,s,i,b,b2,d) +GROUP BY 1 ORDER BY 1; + time | smallint | int | bigint | bigint2 | double +----------------------+----------+-------------+----------------------+-------------+----------- + -9223372036854775808 | -32768 | -2147483648 | -9223372036854775808 | -2147483648 | -Infinity + 0 | 0 | 0 | 0 | 0 | Infinity + 9223372036854775807 | 32767 | 2147483647 | 9223372036854775807 | 2147483647 | Infinity +(3 rows) + +-- issue #2232: This query used to trigger error "could not find +-- pathkey item to sort" due to a corrupt query plan +SELECT time_bucket_gapfill('1 h', time) AS time, + locf(sum(v1)) AS v1_sum, + interpolate(sum(v2)) AS v2_sum +FROM metrics_tstz +WHERE time >= '2018-01-01 04:00' AND time < '2018-01-01 08:00' +GROUP BY 1 +ORDER BY 1 DESC; + time | v1_sum | v2_sum +------------------------------+--------+-------- + Mon Jan 01 07:00:00 2018 PST | 2.3 | 70 + Mon Jan 01 06:00:00 2018 PST | 2.1 | 65 + Mon Jan 01 05:00:00 2018 PST | 2.1 | 60 + Mon Jan 01 04:00:00 2018 PST | | +(4 rows) + +-- query without gapfill: +SELECT time_bucket('1 h', time) AS time, + sum(v1) AS v1_sum, + sum(v2) AS v1_sum +FROM metrics_tstz +WHERE time >= '2018-01-01 04:00' AND time < '2018-01-01 08:00' +GROUP BY 1 +ORDER BY 1 DESC; + time | v1_sum | v1_sum +------------------------------+--------+-------- + Mon Jan 01 07:00:00 2018 PST | 2.3 | 70 + Mon Jan 01 05:00:00 2018 PST | 2.1 | 60 +(2 rows) + +-- query to show original data +SELECT * FROM metrics_tstz +WHERE time >= '2018-01-01 04:00' AND time < '2018-01-01 08:00' +ORDER BY 1 DESC, 2; + time | device_id | v1 | v2 +------------------------------+-----------+-----+---- + Mon Jan 01 07:00:00 2018 PST | 1 | 0 | 0 + Mon Jan 01 07:00:00 2018 PST | 2 | 1.4 | 40 + Mon Jan 01 07:00:00 2018 PST | 3 | 0.9 | 30 + Mon Jan 01 05:00:00 2018 PST | 1 | 0.5 | 10 + Mon Jan 01 05:00:00 2018 PST | 2 | 0.7 | 20 + Mon Jan 01 05:00:00 2018 PST | 3 | 0.9 | 30 +(6 rows) + +-- issue #3048 +-- test gapfill/hashagg planner interaction +-- this used to produce a plan without gapfill node +EXPLAIN (costs off) SELECT time_bucket_gapfill('52w', time, start:='2000-01-01', finish:='2000-01-10') AS time, + sum(v1) AS v1_sum +FROM metrics +GROUP BY 1; +QUERY PLAN + Custom Scan (GapFill) + -> Sort + Sort Key: (time_bucket_gapfill('@ 364 days'::interval, _hyper_X_X_chunk."time", 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone, 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone)) + -> HashAggregate + Group Key: time_bucket_gapfill('@ 364 days'::interval, _hyper_X_X_chunk."time", 'Sat Jan 01 00:00:00 2000 PST'::timestamp with time zone, 'Mon Jan 10 00:00:00 2000 PST'::timestamp with time zone) + -> Result + -> Append + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk + -> Seq Scan on _hyper_X_X_chunk +(10 rows) + +-- issue #3834 +-- test projection handling in gapfill +CREATE TABLE i3834(time timestamptz NOT NULL, ship_id int, value float); +SELECT table_name FROM create_hypertable('i3834','time'); + table_name + i3834 +(1 row) + +INSERT INTO i3834 VALUES ('2020-12-01 14:05:00+01',1,3.123), ('2020-12-01 14:05:00+01',2,4.123), ('2020-12-01 14:05:00+01',3,5.123); +SELECT + time_bucket_gapfill('30000 ms'::interval, time) AS time, + ship_id, + interpolate (avg(value)), + 'speedlog' AS source +FROM + i3834 +WHERE + ship_id IN (1, 2) + AND time >= '2020-12-01 14:05:00+01' + AND time < '2020-12-01 14:10:00+01' +GROUP BY 1,2; + time | ship_id | interpolate | source +------------------------------+---------+-------------+---------- + Tue Dec 01 05:05:00 2020 PST | 1 | 3.123 | speedlog + Tue Dec 01 05:05:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:06:00 2020 PST | 1 | | speedlog + Tue Dec 01 05:06:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:07:00 2020 PST | 1 | | speedlog + Tue Dec 01 05:07:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:08:00 2020 PST | 1 | | speedlog + Tue Dec 01 05:08:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:09:00 2020 PST | 1 | | speedlog + Tue Dec 01 05:09:30 2020 PST | 1 | | speedlog + Tue Dec 01 05:05:00 2020 PST | 2 | 4.123 | speedlog + Tue Dec 01 05:05:30 2020 PST | 2 | | speedlog + Tue Dec 01 05:06:00 2020 PST | 2 | | speedlog + Tue Dec 01 05:06:30 2020 PST | 2 | | speedlog + Tue Dec 01 05:07:00 2020 PST | 2 | | speedlog + Tue Dec 01 05:07:30 2020 PST | 2 | | speedlog + Tue Dec 01 05:08:00 2020 PST | 2 | | speedlog + Tue Dec 01 05:08:30 2020 PST | 2 | | speedlog + Tue Dec 01 05:09:00 2020 PST | 2 | | speedlog + Tue Dec 01 05:09:30 2020 PST | 2 | | speedlog +(20 rows) + +DROP TABLE i3834; +-- issue #1528 +-- test float rounding for certain float values when start and end are identical +SELECT + time_bucket_gapfill('1min'::interval, ts::timestamptz, start:='2019-11-05 2:20', finish:='2019-11-05 2:30'), + interpolate(avg(20266.959547::float4)) AS float4, + interpolate(avg(20266.959547::float8)) AS float8 +FROM (VALUES ('2019-11-05 2:20'), ('2019-11-05 2:30')) v (ts) +GROUP BY 1; + time_bucket_gapfill | float4 | float8 +------------------------------+-----------------+-------------- + Tue Nov 05 02:20:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:21:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:22:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:23:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:24:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:25:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:26:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:27:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:28:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:29:00 2019 PST | 20266.958984375 | 20266.959547 + Tue Nov 05 02:30:00 2019 PST | 20266.958984375 | 20266.959547 +(11 rows) + +-- check gapfill group change detection with TOASTed values +CREATE TABLE gapfill_group_toast(time timestamptz NOT NULL, device text, value float); +SELECT table_name FROM create_hypertable('gapfill_group_toast', 'time'); + table_name + gapfill_group_toast +(1 row) + +INSERT INTO gapfill_group_toast +SELECT + generate_series('2022-06-01'::timestamptz, '2022-06-03'::timestamptz, '1min'::interval), + '4e0ee04cc6a94fd40497b8dbaac2fe434e0ee04cc6a94fd40497b8dbaac2fe43', + random(); +ALTER TABLE gapfill_group_toast SET(timescaledb.compress, timescaledb.compress_segmentby = 'device'); +SELECT count(compress_chunk(c)) FROM show_chunks('gapfill_group_toast') c; + count + 2 +(1 row) + +SELECT + time_bucket_gapfill('1 day', time), device +FROM gapfill_group_toast +WHERE time >= '2022-06-01' AND time <= '2022-06-02' +GROUP BY 1,2; + time_bucket_gapfill | device +------------------------------+------------------------------------------------------------------ + Tue May 31 17:00:00 2022 PDT | 4e0ee04cc6a94fd40497b8dbaac2fe434e0ee04cc6a94fd40497b8dbaac2fe43 + Wed Jun 01 17:00:00 2022 PDT | 4e0ee04cc6a94fd40497b8dbaac2fe434e0ee04cc6a94fd40497b8dbaac2fe43 +(2 rows) + +DROP TABLE gapfill_group_toast; +-- test bucketing by month +SELECT time_bucket_gapfill('2 month'::interval, ts, '2000-01-01'::timestamptz,'2001-01-01'::timestamptz) FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Fri Dec 31 16:00:00 1999 PST + Tue Feb 29 16:00:00 2000 PST + Sun Apr 30 17:00:00 2000 PDT + Fri Jun 30 17:00:00 2000 PDT + Thu Aug 31 17:00:00 2000 PDT + Tue Oct 31 16:00:00 2000 PST + Sun Dec 31 16:00:00 2000 PST +(7 rows) + +SELECT time_bucket_gapfill('1 year'::interval, ts, '2000-01-01'::timestamptz,'2003-01-01'::timestamptz) FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Fri Dec 31 16:00:00 1999 PST + Sun Dec 31 16:00:00 2000 PST + Mon Dec 31 16:00:00 2001 PST + Tue Dec 31 16:00:00 2002 PST +(4 rows) + +SELECT time_bucket_gapfill('1 century'::interval, ts, '1900-01-01'::timestamptz,'2103-01-01'::timestamptz) FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Sun Dec 31 16:00:00 1899 PST + Fri Dec 31 16:00:00 1999 PST + Thu Dec 31 16:00:00 2099 PST +(3 rows) + +-- test bucketing with timezone +SELECT time_bucket_gapfill('2 month'::interval, ts, 'Europe/Berlin', '2000-01-01','2001-01-01') FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Fri Dec 31 15:00:00 1999 PST + Tue Feb 29 15:00:00 2000 PST + Sat Apr 29 15:00:00 2000 PDT + Thu Jun 29 15:00:00 2000 PDT + Tue Aug 29 15:00:00 2000 PDT + Sun Oct 29 15:00:00 2000 PST + Fri Dec 29 15:00:00 2000 PST +(7 rows) + +SELECT time_bucket_gapfill('2 month'::interval, ts, current_setting('timezone'), '2000-01-01','2001-01-01') FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Sat Jan 01 00:00:00 2000 PST + Wed Mar 01 00:00:00 2000 PST + Mon May 01 00:00:00 2000 PDT + Sat Jul 01 00:00:00 2000 PDT + Fri Sep 01 00:00:00 2000 PDT + Wed Nov 01 00:00:00 2000 PST +(6 rows) + +SELECT time_bucket_gapfill('2 month'::interval, ts, 'UTC', '2000-01-01','2001-01-01') FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Fri Dec 31 16:00:00 1999 PST + Tue Feb 29 16:00:00 2000 PST + Sat Apr 29 16:00:00 2000 PDT + Thu Jun 29 16:00:00 2000 PDT + Tue Aug 29 16:00:00 2000 PDT + Sun Oct 29 16:00:00 2000 PST + Fri Dec 29 16:00:00 2000 PST +(7 rows) + +SET timezone TO 'Europe/Berlin'; +SELECT time_bucket_gapfill('2 month'::interval, ts, 'Europe/Berlin', '2000-01-01','2001-01-01') FROM (VALUES ('2000-03-01'::timestamptz)) v(ts) GROUP BY 1; + time_bucket_gapfill + Sat Jan 01 00:00:00 2000 CET + Wed Mar 01 00:00:00 2000 CET + Mon May 01 00:00:00 2000 CEST + Sat Jul 01 00:00:00 2000 CEST + Fri Sep 01 00:00:00 2000 CEST + Wed Nov 01 00:00:00 2000 CET +(6 rows) + +RESET timezone; +DROP INDEX gapfill_plan_test_indx; +-- Test gapfill with arrays (#5981) +SELECT time_bucket_gapfill(5, ts, 1, 100) as ts, int_arr, locf(last(value, ts)) +FROM ( + SELECT ARRAY[1,2,3,4]::int[] as int_arr, x as ts, x+500000 as value + FROM generate_series(1, 10, 100) as x + ) t +GROUP BY 1, 2 + ts | int_arr | locf +----+-----------+-------- + 0 | {1,2,3,4} | 500001 + 5 | {1,2,3,4} | 500001 + 10 | {1,2,3,4} | 500001 + 15 | {1,2,3,4} | 500001 + 20 | {1,2,3,4} | 500001 + 25 | {1,2,3,4} | 500001 + 30 | {1,2,3,4} | 500001 + 35 | {1,2,3,4} | 500001 + 40 | {1,2,3,4} | 500001 + 45 | {1,2,3,4} | 500001 + 50 | {1,2,3,4} | 500001 + 55 | {1,2,3,4} | 500001 + 60 | {1,2,3,4} | 500001 + 65 | {1,2,3,4} | 500001 + 70 | {1,2,3,4} | 500001 + 75 | {1,2,3,4} | 500001 + 80 | {1,2,3,4} | 500001 + 85 | {1,2,3,4} | 500001 + 90 | {1,2,3,4} | 500001 + 95 | {1,2,3,4} | 500001 +(20 rows) + diff --git a/tsl/test/shared/sql/.gitignore b/tsl/test/shared/sql/.gitignore index ca69145ab79..1e6053ee040 100644 --- a/tsl/test/shared/sql/.gitignore +++ b/tsl/test/shared/sql/.gitignore @@ -4,6 +4,7 @@ /dist_fetcher_type-*.sql /dist_remote_error-*.sql /dist_remote_error.text +/gapfill-*.sql /generated_columns-*.sql /ordered_append-*.sql /ordered_append_join-*.sql diff --git a/tsl/test/shared/sql/CMakeLists.txt b/tsl/test/shared/sql/CMakeLists.txt index 6c6ca32042d..a7f485d90b2 100644 --- a/tsl/test/shared/sql/CMakeLists.txt +++ b/tsl/test/shared/sql/CMakeLists.txt @@ -7,12 +7,12 @@ set(TEST_FILES_SHARED constraint_exclusion_prepared.sql decompress_join.sql decompress_placeholdervar.sql - gapfill.sql subtract_integer_from_now.sql) set(TEST_TEMPLATES_SHARED - generated_columns.sql.in ordered_append.sql.in ordered_append_join.sql.in - transparent_decompress_chunk.sql.in space_constraint.sql.in) + generated_columns.sql.in gapfill.sql.in ordered_append.sql.in + ordered_append_join.sql.in transparent_decompress_chunk.sql.in + space_constraint.sql.in) if((${PG_VERSION_MAJOR} GREATER_EQUAL "14")) list(APPEND TEST_FILES_SHARED compression_dml.sql decompress_tracking.sql diff --git a/tsl/test/shared/sql/gapfill.sql b/tsl/test/shared/sql/gapfill.sql.in similarity index 100% rename from tsl/test/shared/sql/gapfill.sql rename to tsl/test/shared/sql/gapfill.sql.in diff --git a/tsl/test/sql/.gitignore b/tsl/test/sql/.gitignore index 7c80876e95a..dd66d992c90 100644 --- a/tsl/test/sql/.gitignore +++ b/tsl/test/sql/.gitignore @@ -1,7 +1,6 @@ /*.pgbinary /cagg_bgw-*.sql /cagg_ddl-*.sql -/cagg_ddl_dist_ht-*.sql /cagg_errors_deprecated-*.sql /cagg_invalidation_dist_ht-*.sql /cagg_permissions-*.sql @@ -28,7 +27,6 @@ /modify_exclusion-*.sql /plan_skip_scan-*.sql /remote-copy-*sv -/telemetry_stats-*.sql /transparent_decompression-*.sql /transparent_decompression_ordered_index-*.sql /merge_append_partially_compressed-*.sql diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index f7cc6c1726f..a30fb89e934 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -30,7 +30,7 @@ set(TEST_FILES skip_scan.sql size_utils_tsl.sql) -if(ENABLE_MULTINODE_TESTS) +if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16") list(APPEND TEST_FILES dist_param.sql dist_views.sql) endif() @@ -71,11 +71,7 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) cagg_on_cagg_joins.sql cagg_tableam.sql cagg_policy_run.sql - data_fetcher.sql - data_node_bootstrap.sql - data_node.sql ddl_hook.sql - debug_notice.sql decompress_vector_qual.sql hypertable_generalization.sql insert_memory_usage.sql @@ -88,15 +84,23 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) recompress_chunk_segmentwise.sql transparent_decompression_join_index.sql feature_flags.sql) + if(USE_TELEMETRY) + list(APPEND TEST_FILES telemetry_stats.sql) + endif() - if(ENABLE_MULTINODE_TESTS) + if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16") list( APPEND TEST_FILES cagg_bgw_dist_ht.sql + cagg_ddl_dist_ht.sql cagg_migrate_dist_ht.sql cagg_on_cagg_dist_ht.sql cagg_on_cagg_joins_dist_ht.sql + data_fetcher.sql + data_node_bootstrap.sql + data_node.sql + debug_notice.sql dist_api_calls.sql dist_commands.sql dist_compression.sql @@ -148,11 +152,8 @@ set(SOLO_TESTS compress_bgw_reorder_drop_chunks compression_ddl cagg_bgw - cagg_bgw_dist_ht - cagg_ddl - cagg_ddl_dist_ht + cagg_ddl-${PG_VERSION_MAJOR} cagg_dump - data_fetcher dist_util move remote_connection_cache @@ -163,10 +164,16 @@ set(SOLO_TESTS telemetry_stats-${PG_VERSION_MAJOR}) # In PG versions 15.0 to 15.2, dist_move_chunk can cause a deadlock when run in # parallel with other tests as mentioned in #4972. -if(${PG_VERSION_MAJOR} EQUAL "15" AND ${PG_VERSION_MINOR} LESS "3") +if(ENABLE_MULTINODE_TESTS + AND ${PG_VERSION_MAJOR} EQUAL "15" + AND ${PG_VERSION_MINOR} LESS "3") list(APPEND SOLO_TESTS dist_move_chunk) endif() +if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16") + list(APPEND SOLO_TESTS cagg_bgw_dist_ht data_fetcher) +endif() + set(TEST_TEMPLATES compression_sorted_merge.sql.in cagg_union_view.sql.in @@ -194,11 +201,10 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) continuous_aggs.sql.in continuous_aggs_deprecated.sql.in deparse.sql.in) - if(ENABLE_MULTINODE_TESTS) + if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16") list( APPEND TEST_TEMPLATES - cagg_ddl_dist_ht.sql.in cagg_invalidation_dist_ht.sql.in dist_hypertable.sql.in dist_grant.sql.in @@ -207,9 +213,6 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) dist_partial_agg.sql.in dist_query.sql.in) endif() - if(USE_TELEMETRY) - list(APPEND TEST_TEMPLATES telemetry_stats.sql.in) - endif() endif(CMAKE_BUILD_TYPE MATCHES Debug) # Check if PostgreSQL was compiled with JIT support diff --git a/tsl/test/sql/cagg_ddl_dist_ht.sql.in b/tsl/test/sql/cagg_ddl_dist_ht.sql similarity index 100% rename from tsl/test/sql/cagg_ddl_dist_ht.sql.in rename to tsl/test/sql/cagg_ddl_dist_ht.sql diff --git a/tsl/test/sql/chunk_api.sql b/tsl/test/sql/chunk_api.sql index 83ddd3b55cf..ac0a6ec5f18 100644 --- a/tsl/test/sql/chunk_api.sql +++ b/tsl/test/sql/chunk_api.sql @@ -3,10 +3,6 @@ -- LICENSE-TIMESCALE for a copy of the license. \c :TEST_DBNAME :ROLE_SUPERUSER -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 - -\ir include/remote_exec.sql GRANT CREATE ON DATABASE :"TEST_DBNAME" TO :ROLE_DEFAULT_PERM_USER; SET ROLE :ROLE_DEFAULT_PERM_USER; @@ -137,126 +133,6 @@ FROM pg_stats WHERE tablename IN FROM show_chunks('chunkapi')) ORDER BY tablename, attname; --- Test getting chunk stats on a distribute hypertable -SET ROLE :ROLE_CLUSTER_SUPERUSER; - -SELECT node_name, database, node_created, database_created, extension_created -FROM ( - SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* - FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2')) v(name) -) a; - -GRANT USAGE - ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2 - TO :ROLE_1, :ROLE_DEFAULT_PERM_USER; --- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes -GRANT CREATE ON SCHEMA public TO :ROLE_1; - -SET ROLE :ROLE_1; -CREATE TABLE disttable (time timestamptz, device int, temp float, color text); -SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device'); -INSERT INTO disttable VALUES ('2018-01-01 05:00:00-8', 1, 23.4, 'green'), - ('2018-01-01 06:00:00-8', 4, 22.3, NULL), - ('2018-01-01 06:00:00-8', 1, 21.1, 'green'); - --- Make sure we get deterministic behavior across all nodes -CALL distributed_exec($$ SELECT setseed(1); $$); - --- No stats on the local table -SELECT * FROM _timescaledb_functions.get_chunk_relstats('disttable'); -SELECT * FROM _timescaledb_functions.get_chunk_colstats('disttable'); - -SELECT relname, reltuples, relpages, relallvisible FROM pg_class WHERE relname IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY relname; -SELECT * FROM pg_stats WHERE tablename IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY 1,2,3; - --- Run ANALYZE on data node 1 -CALL distributed_exec('ANALYZE disttable', ARRAY[:'DATA_NODE_1']); - --- Stats should now be refreshed after running get_chunk_{col,rel}stats -SELECT relname, reltuples, relpages, relallvisible FROM pg_class WHERE relname IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY relname; -SELECT * FROM pg_stats WHERE tablename IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY 1,2,3; - -SELECT * FROM _timescaledb_functions.get_chunk_relstats('disttable'); -SELECT * FROM _timescaledb_functions.get_chunk_colstats('disttable'); - -SELECT relname, reltuples, relpages, relallvisible FROM pg_class WHERE relname IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY relname; - -SELECT * FROM pg_stats WHERE tablename IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY 1,2,3; - --- Test that user without table permissions can't get column stats -SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT * FROM _timescaledb_functions.get_chunk_colstats('disttable'); -SET ROLE :ROLE_1; - --- Run ANALYZE again, but on both nodes. -ANALYZE disttable; - --- Now expect stats from all data node chunks -SELECT * FROM _timescaledb_functions.get_chunk_relstats('disttable'); -SELECT * FROM _timescaledb_functions.get_chunk_colstats('disttable'); - --- Test ANALYZE with a replica chunk. We'd like to ensure the --- stats-fetching functions handle duplicate stats from different (but --- identical) replica chunks. -SELECT set_replication_factor('disttable', 2); -INSERT INTO disttable VALUES ('2019-01-01 05:00:00-8', 1, 23.4, 'green'); --- Run twice to test that stats-fetching functions handle replica chunks. -ANALYZE disttable; -ANALYZE disttable; - -SELECT relname, reltuples, relpages, relallvisible FROM pg_class WHERE relname IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY relname; -SELECT * FROM pg_stats WHERE tablename IN -(SELECT (_timescaledb_functions.show_chunk(show_chunks)).table_name - FROM show_chunks('disttable')) -ORDER BY 1,2,3; - --- Check underlying pg_statistics table (looking at all columns except --- starelid, which changes depending on how many tests are run before --- this) -RESET ROLE; -SELECT ch, staattnum, stainherit, stanullfrac, stawidth, stadistinct, stakind1, stakind2, stakind3, stakind4, stakind5, staop1, staop2, staop3, staop4, staop5, -stanumbers1, stanumbers2, stanumbers3, stanumbers4, stanumbers5, stavalues1, stavalues2, stavalues3, stavalues4, stavalues5 -FROM pg_statistic st, show_chunks('disttable') ch -WHERE st.starelid = ch -ORDER BY ch, staattnum; - -SELECT test.remote_exec(NULL, $$ -SELECT ch, staattnum, stainherit, stanullfrac, stawidth, stadistinct, stakind1, stakind2, stakind3, stakind4, stakind5, staop1, staop2, staop3, staop4, staop5, -stanumbers1, stanumbers2, stanumbers3, stanumbers4, stanumbers5, stavalues1, stavalues2, stavalues3, stavalues4, stavalues5 -FROM pg_statistic st, show_chunks('disttable') ch -WHERE st.starelid = ch -ORDER BY ch, staattnum; -$$); - --- Clean up -RESET ROLE; -TRUNCATE disttable; -SELECT * FROM delete_data_node(:'DATA_NODE_1', force => true); -SELECT * FROM delete_data_node(:'DATA_NODE_2', force => true); -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); - -- Test create_chunk_table to recreate the chunk table and show dimension slices SET ROLE :ROLE_DEFAULT_PERM_USER; @@ -517,10 +393,3 @@ SELECT * FROM chunkapi ORDER BY 1,2,3; SELECT * FROM test.show_constraints(format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass); DROP TABLE chunkapi; - -\c :TEST_DBNAME :ROLE_SUPERUSER -SET client_min_messages = ERROR; -DROP TABLESPACE tablespace1; -DROP TABLESPACE tablespace2; -SET client_min_messages = NOTICE; -\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER diff --git a/tsl/test/sql/chunk_utils_internal.sql b/tsl/test/sql/chunk_utils_internal.sql index 4c90381e7d8..82458babcd3 100644 --- a/tsl/test/sql/chunk_utils_internal.sql +++ b/tsl/test/sql/chunk_utils_internal.sql @@ -439,34 +439,6 @@ WHERE hypertable_id IN (SELECT id from _timescaledb_catalog.hypertable WHERE table_name = 'ht_try') ORDER BY table_name; --- TEST error try freeze/unfreeze on dist hypertable --- Add distributed hypertables -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 -\c :TEST_DBNAME :ROLE_SUPERUSER - -SELECT node_name, database, node_created, database_created, extension_created -FROM ( - SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* - FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2')) v(name) -) a; - -CREATE TABLE disthyper (timec timestamp, device integer); -SELECT create_distributed_hypertable('disthyper', 'timec', 'device'); -INSERT into disthyper VALUES ('2020-01-01', 10); - ---freeze one of the chunks -SELECT chunk_schema || '.' || chunk_name as "CHNAME3" -FROM timescaledb_information.chunks -WHERE hypertable_name = 'disthyper' -ORDER BY chunk_name LIMIT 1 -\gset - -\set ON_ERROR_STOP 0 -SELECT _timescaledb_functions.freeze_chunk( :'CHNAME3'); -SELECT _timescaledb_functions.unfreeze_chunk( :'CHNAME3'); -\set ON_ERROR_STOP 1 - -- TEST can create OSM chunk if there are constraints on the hypertable \c :TEST_DBNAME :ROLE_4 CREATE TABLE measure( id integer PRIMARY KEY, mname varchar(10)); @@ -767,6 +739,3 @@ INSERT INTO osm_slice_update VALUES (1); -- clean up databases created \c :TEST_DBNAME :ROLE_SUPERUSER DROP DATABASE postgres_fdw_db WITH (FORCE); -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); - diff --git a/tsl/test/sql/exp_cagg_monthly.sql b/tsl/test/sql/exp_cagg_monthly.sql index 894e8e94b87..60a4ab4222d 100644 --- a/tsl/test/sql/exp_cagg_monthly.sql +++ b/tsl/test/sql/exp_cagg_monthly.sql @@ -407,108 +407,6 @@ SELECT * FROM conditions_large_1y ORDER BY bucket; RESET timescaledb.materializations_per_refresh_window; --- Test caggs with monthly buckets on top of distributed hypertable -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 -\set DATA_NODE_3 :TEST_DBNAME _3 - -SELECT node_name, database, node_created, database_created, extension_created -FROM ( - SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* - FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) -) a; - -GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; --- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes -GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; -SET ROLE :ROLE_DEFAULT_PERM_USER; - -CREATE TABLE conditions_dist( - day DATE NOT NULL, - temperature INT NOT NULL); - -SELECT table_name FROM create_distributed_hypertable('conditions_dist', 'day', chunk_time_interval => INTERVAL '1 day'); - -INSERT INTO conditions_dist(day, temperature) -SELECT ts, date_part('month', ts)*100 + date_part('day', ts) -FROM generate_series('2010-01-01' :: date, '2010-03-01' :: date - interval '1 day', '1 day') as ts; - -CREATE MATERIALIZED VIEW conditions_dist_1m -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - timescaledb_experimental.time_bucket_ng('1 month', day) AS bucket, - MIN(temperature), - MAX(temperature) -FROM conditions_dist -GROUP BY bucket; - -SELECT mat_hypertable_id AS cagg_id -FROM _timescaledb_catalog.continuous_agg -WHERE user_view_name = 'conditions_dist_1m' -\gset - -SELECT raw_hypertable_id AS ht_id -FROM _timescaledb_catalog.continuous_agg -WHERE user_view_name = 'conditions_dist_1m' -\gset - -SELECT bucket_width -FROM _timescaledb_catalog.continuous_agg -WHERE mat_hypertable_id = :cagg_id; - -SELECT experimental, name, bucket_width, origin, timezone -FROM _timescaledb_catalog.continuous_aggs_bucket_function -WHERE mat_hypertable_id = :cagg_id; - -SELECT * FROM conditions_dist_1m ORDER BY bucket; - --- Same test but with non-realtime, NO DATA aggregate and manual refresh - -CREATE MATERIALIZED VIEW conditions_dist_1m_manual -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - timescaledb_experimental.time_bucket_ng('1 month', day) AS bucket, - MIN(temperature), - MAX(temperature) -FROM conditions_dist -GROUP BY bucket -WITH NO DATA; - -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - -CALL refresh_continuous_aggregate('conditions_dist_1m_manual', '2010-01-01', '2010-03-01'); -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - --- Check invalidation for caggs on top of distributed hypertable - -INSERT INTO conditions_dist(day, temperature) -VALUES ('2010-01-15', 999), ('2010-02-15', -999), ('2010-03-01', 15); - -SELECT * FROM conditions_dist_1m ORDER BY bucket; -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - -CALL refresh_continuous_aggregate('conditions_dist_1m', '2010-01-01', '2010-04-01'); -SELECT * FROM conditions_dist_1m ORDER BY bucket; -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - -CALL refresh_continuous_aggregate('conditions_dist_1m_manual', '2010-01-01', '2010-04-01'); -SELECT * FROM conditions_dist_1m ORDER BY bucket; -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - -ALTER MATERIALIZED VIEW conditions_dist_1m_manual SET ( timescaledb.compress ); -SELECT compress_chunk(ch) -FROM show_chunks('conditions_dist_1m_manual') ch limit 1; -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - --- Clean up -DROP TABLE conditions_dist CASCADE; - -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); -DROP DATABASE :DATA_NODE_3 WITH (FORCE); - -- Test the specific code path of creating a CAGG on top of empty hypertable. CREATE TABLE conditions_empty( diff --git a/tsl/test/sql/exp_cagg_origin.sql b/tsl/test/sql/exp_cagg_origin.sql index ff4593c99b1..2c1be16e3b9 100644 --- a/tsl/test/sql/exp_cagg_origin.sql +++ b/tsl/test/sql/exp_cagg_origin.sql @@ -254,107 +254,6 @@ ORDER BY month, city; -- Clean up DROP TABLE conditions CASCADE; --- Test caggs with monthly buckets and custom origin on top of distributed hypertable -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 -\set DATA_NODE_3 :TEST_DBNAME _3 - -SELECT node_name, database, node_created, database_created, extension_created -FROM ( - SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* - FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) -) a; - -GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; --- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes -GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; -SET ROLE :ROLE_DEFAULT_PERM_USER; - -CREATE TABLE conditions_dist( - day date NOT NULL, - temperature INT NOT NULL); - -SELECT table_name FROM create_distributed_hypertable('conditions_dist', 'day', chunk_time_interval => INTERVAL '1 day'); - -INSERT INTO conditions_dist(day, temperature) -SELECT ts, date_part('month', ts)*100 + date_part('day', ts) -FROM generate_series('2010-01-01' :: date, '2010-03-01' :: date - interval '1 day', '1 day') as ts; - -CREATE MATERIALIZED VIEW conditions_dist_1m -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - timescaledb_experimental.time_bucket_ng('1 month', day, '2010-01-01') AS bucket, - MIN(temperature), - MAX(temperature) -FROM conditions_dist -GROUP BY bucket; - -SELECT mat_hypertable_id AS cagg_id, raw_hypertable_id AS ht_id -FROM _timescaledb_catalog.continuous_agg -WHERE user_view_name = 'conditions_dist_1m' -\gset - -SELECT bucket_width -FROM _timescaledb_catalog.continuous_agg -WHERE mat_hypertable_id = :cagg_id; - -SELECT experimental, name, bucket_width, origin, timezone -FROM _timescaledb_catalog.continuous_aggs_bucket_function -WHERE mat_hypertable_id = :cagg_id; - -SELECT * FROM conditions_dist_1m ORDER BY bucket; - --- Same test but with non-realtime, NO DATA aggregate and manual refresh - -CREATE MATERIALIZED VIEW conditions_dist_1m_manual -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - timescaledb_experimental.time_bucket_ng('1 month', day, '2005-01-01') AS bucket, - MIN(temperature), - MAX(temperature) -FROM conditions_dist -GROUP BY bucket -WITH NO DATA; - -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - -CALL refresh_continuous_aggregate('conditions_dist_1m_manual', '2010-01-01', '2010-03-01'); -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - --- Check invalidation for caggs on top of distributed hypertable - -INSERT INTO conditions_dist(day, temperature) -VALUES ('2010-01-15', 999), ('2010-02-15', -999), ('2010-03-01', 15); - -SELECT * FROM conditions_dist_1m ORDER BY bucket; -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - -CALL refresh_continuous_aggregate('conditions_dist_1m', '2010-01-01', '2010-04-01'); -SELECT * FROM conditions_dist_1m ORDER BY bucket; -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - -CALL refresh_continuous_aggregate('conditions_dist_1m_manual', '2010-01-01', '2010-04-01'); -SELECT * FROM conditions_dist_1m ORDER BY bucket; -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - --- Compression on top of distributed hypertables - -ALTER MATERIALIZED VIEW conditions_dist_1m_manual SET ( timescaledb.compress ); - -SELECT compress_chunk(ch) -FROM show_chunks('conditions_dist_1m_manual') ch limit 1; - -SELECT * FROM conditions_dist_1m_manual ORDER BY bucket; - --- Clean up -DROP TABLE conditions_dist CASCADE; - -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -SELECT delete_data_node(name) -FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v (name); -SET ROLE :ROLE_DEFAULT_PERM_USER; - -- Test the specific code path of creating a CAGG on top of empty hypertable. CREATE TABLE conditions_empty( @@ -658,9 +557,3 @@ SELECT add_continuous_aggregate_policy('conditions_summary_timestamptz', -- Clean up DROP TABLE conditions_timestamptz CASCADE; - -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); -DROP DATABASE :DATA_NODE_3 WITH (FORCE); - diff --git a/tsl/test/sql/exp_cagg_timezone.sql b/tsl/test/sql/exp_cagg_timezone.sql index 723d9160e90..88886c07e3b 100644 --- a/tsl/test/sql/exp_cagg_timezone.sql +++ b/tsl/test/sql/exp_cagg_timezone.sql @@ -415,140 +415,6 @@ SELECT city, to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as mont FROM conditions2_summary ORDER by month, city; --- Test caggs with monthly buckets on top of distributed hypertable -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 -\set DATA_NODE_3 :TEST_DBNAME _3 - -SELECT node_name, database, node_created, database_created, extension_created -FROM ( - SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).* - FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name) -) a; - -GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; --- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes -GRANT CREATE ON SCHEMA public TO :ROLE_DEFAULT_PERM_USER; -SET ROLE :ROLE_DEFAULT_PERM_USER; - -CREATE TABLE conditions_dist( - day timestamptz NOT NULL, - temperature INT NOT NULL); - -SELECT table_name FROM create_distributed_hypertable('conditions_dist', 'day', chunk_time_interval => INTERVAL '1 day'); - -INSERT INTO conditions_dist(day, temperature) -SELECT ts, date_part('month', ts)*100 + date_part('day', ts) -FROM generate_series('2010-01-01 00:00:00 MSK' :: timestamptz, '2010-03-01 00:00:00 MSK' :: timestamptz - interval '1 day', '1 day') as ts; - -CREATE MATERIALIZED VIEW conditions_dist_1m -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - timescaledb_experimental.time_bucket_ng('1 month', day, 'MSK') AS bucket, - MIN(temperature), - MAX(temperature) -FROM conditions_dist -GROUP BY bucket; - -SELECT mat_hypertable_id AS cagg_id -FROM _timescaledb_catalog.continuous_agg -WHERE user_view_name = 'conditions_dist_1m' -\gset - -SELECT raw_hypertable_id AS ht_id -FROM _timescaledb_catalog.continuous_agg -WHERE user_view_name = 'conditions_dist_1m' -\gset - -SELECT bucket_width -FROM _timescaledb_catalog.continuous_agg -WHERE mat_hypertable_id = :cagg_id; - -SELECT experimental, name, bucket_width, origin, timezone -FROM _timescaledb_catalog.continuous_aggs_bucket_function -WHERE mat_hypertable_id = :cagg_id; - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m -ORDER BY month; - --- Same test but with non-realtime, NO DATA aggregate and manual refresh - -CREATE MATERIALIZED VIEW conditions_dist_1m_manual -WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS -SELECT - timescaledb_experimental.time_bucket_ng('1 month', day, 'MSK') AS bucket, - MIN(temperature), - MAX(temperature) -FROM conditions_dist -GROUP BY bucket -WITH NO DATA; - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m_manual -ORDER BY month; - -CALL refresh_continuous_aggregate('conditions_dist_1m_manual', '2010-01-01 00:00:00 MSK', '2010-03-01 00:00:00 MSK'); - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m_manual -ORDER BY month; - --- Check invalidation for caggs on top of distributed hypertable - -INSERT INTO conditions_dist(day, temperature) VALUES -('2010-01-15 00:00:00 MSK', 999), -('2010-02-15 00:00:00 MSK', -999), -('2010-03-01 00:00:00 MSK', 15); - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m -ORDER BY month; - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m_manual -ORDER BY month; - -CALL refresh_continuous_aggregate('conditions_dist_1m', '2010-01-01 00:00:00 MSK', '2010-04-01 00:00:00 MSK'); - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m -ORDER BY month; - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m_manual -ORDER BY month; - -CALL refresh_continuous_aggregate('conditions_dist_1m_manual', '2010-01-01 00:00:00 MSK', '2010-04-01 00:00:00 MSK'); - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m -ORDER BY month; - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m_manual -ORDER BY month; - --- Check compatibility with compressed distributed hypertables - -ALTER MATERIALIZED VIEW conditions_dist_1m_manual SET ( timescaledb.compress ); - -SELECT compress_chunk(ch) -FROM show_chunks('conditions_dist_1m_manual') ch limit 1; - -SELECT to_char(bucket at time zone 'MSK', 'YYYY-MM-DD HH24:MI:SS') as month, min, max -FROM conditions_dist_1m_manual -ORDER BY month; - --- Clean up -DROP TABLE conditions_dist CASCADE; - -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); -DROP DATABASE :DATA_NODE_3 WITH (FORCE); - -- Make sure add_continuous_aggregate_policy() works CREATE TABLE conditions_policy( diff --git a/tsl/test/sql/read_only.sql b/tsl/test/sql/read_only.sql index 795828d0eab..4c0eec46629 100644 --- a/tsl/test/sql/read_only.sql +++ b/tsl/test/sql/read_only.sql @@ -8,9 +8,6 @@ -- properly recognize read-only transaction state -- -\set DATA_NODE_1 :TEST_DBNAME _1 -\set DATA_NODE_2 :TEST_DBNAME _2 - -- create_hypertable() -- CREATE TABLE test_table(time bigint NOT NULL, device int); @@ -96,76 +93,6 @@ DROP TABLE test_table; SET default_transaction_read_only TO off; DROP TABLE test_table; --- data nodes --- -CREATE TABLE disttable(time timestamptz NOT NULL, device int); - --- add_data_node() --- -SET default_transaction_read_only TO on; -\set ON_ERROR_STOP 0 -SELECT * FROM add_data_node(:'DATA_NODE_1', host => 'localhost', database => :'DATA_NODE_1'); -\set ON_ERROR_STOP 1 - -SET default_transaction_read_only TO off; -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node(:'DATA_NODE_1', host => 'localhost', database => :'DATA_NODE_1'); -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node(:'DATA_NODE_2', host => 'localhost', database => :'DATA_NODE_2'); - --- create_distributed_hypertable() --- -SET default_transaction_read_only TO on; - -\set ON_ERROR_STOP 0 -SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', data_nodes => ARRAY[:'DATA_NODE_1']); -\set ON_ERROR_STOP 1 - -SET default_transaction_read_only TO off; -SELECT * FROM create_distributed_hypertable('disttable', 'time', 'device', data_nodes => ARRAY[:'DATA_NODE_1']); - --- attach_data_node() --- -SET default_transaction_read_only TO on; -\set ON_ERROR_STOP 0 -SELECT * FROM attach_data_node(:'DATA_NODE_2', 'disttable'); -\set ON_ERROR_STOP 1 -SET default_transaction_read_only TO off; -SELECT * FROM attach_data_node(:'DATA_NODE_2', 'disttable'); - --- detach_data_node() --- -SET default_transaction_read_only TO on; -\set ON_ERROR_STOP 0 -SELECT * FROM detach_data_node(:'DATA_NODE_2', 'disttable'); -\set ON_ERROR_STOP 1 -SET default_transaction_read_only TO off; -SELECT * FROM detach_data_node(:'DATA_NODE_2', 'disttable'); - --- delete_data_node() --- -SET default_transaction_read_only TO on; -\set ON_ERROR_STOP 0 -SELECT * FROM delete_data_node(:'DATA_NODE_2'); -\set ON_ERROR_STOP 1 -SET default_transaction_read_only TO off; -SELECT * FROM delete_data_node(:'DATA_NODE_2'); - --- set_replication_factor() --- -SET default_transaction_read_only TO on; -\set ON_ERROR_STOP 0 -SELECT * FROM set_replication_factor('disttable', 2); -\set ON_ERROR_STOP 1 - --- drop distributed hypertable --- -\set ON_ERROR_STOP 0 -DROP TABLE disttable; -\set ON_ERROR_STOP 1 -SET default_transaction_read_only TO off; -DROP TABLE disttable; - -- Test some read-only cases of DDL operations -- CREATE TABLE test_table(time bigint NOT NULL, device int); @@ -286,8 +213,3 @@ SELECT remove_retention_policy('test_table'); SELECT add_job('now','12h'); SELECT alter_job(1,scheduled:=false); SELECT delete_job(1); - -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -DROP DATABASE :DATA_NODE_1 WITH (FORCE); -DROP DATABASE :DATA_NODE_2 WITH (FORCE); - diff --git a/tsl/test/sql/telemetry_stats.sql.in b/tsl/test/sql/telemetry_stats.sql similarity index 73% rename from tsl/test/sql/telemetry_stats.sql.in rename to tsl/test/sql/telemetry_stats.sql index c48cd91a485..0b7e35776dd 100644 --- a/tsl/test/sql/telemetry_stats.sql.in +++ b/tsl/test/sql/telemetry_stats.sql @@ -122,128 +122,6 @@ ANALYZE normal, hyper, part; REFRESH MATERIALIZED VIEW telemetry_report; SELECT jsonb_pretty(rels) AS relations FROM relations; --- Add distributed hypertables -\set DN_DBNAME_1 :TEST_DBNAME _1 -\set DN_DBNAME_2 :TEST_DBNAME _2 - --- Not an access node or data node -SELECT r -> 'num_data_nodes' AS num_data_nodes, - r -> 'distributed_member' AS distributed_member -FROM telemetry_report; - --- Become an access node by adding a data node -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); - --- Telemetry should show one data node and "acces node" status -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT r -> 'num_data_nodes' AS num_data_nodes, - r -> 'distributed_member' AS distributed_member -FROM telemetry_report; - --- See telemetry report from a data node -\ir include/remote_exec.sql -SELECT test.remote_exec(NULL, $$ - SELECT t -> 'num_data_nodes' AS num_data_nodes, - t -> 'distributed_member' AS distributed_member - FROM get_telemetry_report() t; -$$); - -SELECT node_name, database, node_created, database_created, extension_created -FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); -CREATE TABLE disthyper (LIKE normal); -SELECT create_distributed_hypertable('disthyper', 'time', 'device'); - --- Show distributed hypertables stats with no data -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - --- No datanode-related stats on the access node -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn -FROM relations; - --- Insert data into the distributed hypertable -INSERT INTO disthyper -SELECT * FROM normal; - --- Update telemetry stats and show output on access node and data --- nodes. Note that the access node doesn't store data so shows --- zero. It should have stats from ANALYZE, though, like --- num_reltuples. -ANALYZE disthyper; -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - --- Show data node stats -SELECT test.remote_exec(NULL, $$ - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t; -$$); - --- Add compression -ALTER TABLE disthyper SET (timescaledb.compress); -SELECT compress_chunk(c) -FROM show_chunks('disthyper') c ORDER BY c LIMIT 4; - -ANALYZE disthyper; --- Update telemetry stats and show updated compression stats -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - --- Show data node stats -SELECT test.remote_exec(NULL, $$ - SELECT - jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn - FROM get_telemetry_report() t; -$$); - --- Create a replicated distributed hypertable and show replication stats -CREATE TABLE disthyper_repl (LIKE normal); -SELECT create_distributed_hypertable('disthyper_repl', 'time', 'device', replication_factor => 2); -INSERT INTO disthyper_repl -SELECT * FROM normal; - -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an -FROM relations; - --- Create a continuous aggregate on the distributed hypertable -CREATE MATERIALIZED VIEW distcontagg -WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - disthyper -GROUP BY hour, device; - -CREATE MATERIALIZED VIEW distcontagg_old -WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS -SELECT - time_bucket('1 hour', time) AS hour, - device, - min(time) -FROM - disthyper -GROUP BY hour, device; - -VACUUM; - -REFRESH MATERIALIZED VIEW telemetry_report; -SELECT - jsonb_pretty(rels -> 'continuous_aggregates') AS continuous_aggregates -FROM relations; - -- check telemetry for fixed schedule jobs works create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ begin @@ -353,8 +231,3 @@ SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggrega DROP VIEW relations; DROP MATERIALIZED VIEW telemetry_report; - -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -DROP DATABASE :DN_DBNAME_1 WITH (FORCE); -DROP DATABASE :DN_DBNAME_2 WITH (FORCE); - diff --git a/tsl/test/src/remote/remote_exec.c b/tsl/test/src/remote/remote_exec.c index 89a3dc2f4b1..eb1dca9ad0c 100644 --- a/tsl/test/src/remote/remote_exec.c +++ b/tsl/test/src/remote/remote_exec.c @@ -164,6 +164,18 @@ extern List *hypertable_data_node_array_to_list(ArrayType *serverarr); Datum ts_remote_exec(PG_FUNCTION_ARGS) { +#if PG16_GE + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("executing remote command is not supported"), + errdetail("Multi-node is not supported anymore on PostgreSQL >= 16."))); +#else + ereport(WARNING, + (errcode(ERRCODE_WARNING_DEPRECATED_FEATURE), + errmsg("executing remote command is deprecated"), + errdetail("Multi-node is deprecated and will be removed in future releases."))); +#endif + ArrayType *data_nodes = PG_ARGISNULL(0) ? NULL : PG_GETARG_ARRAYTYPE_P(0); char *sql = TextDatumGetCString(PG_GETARG_DATUM(1)); List *data_node_list; @@ -202,6 +214,18 @@ ts_remote_exec(PG_FUNCTION_ARGS) Datum ts_remote_exec_get_result_strings(PG_FUNCTION_ARGS) { +#if PG16_GE + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("getting results from remote command execution is not supported"), + errdetail("Multi-node is not supported anymore on PostgreSQL >= 16."))); +#else + ereport(WARNING, + (errcode(ERRCODE_WARNING_DEPRECATED_FEATURE), + errmsg("getting results from remote command execution is deprecated"), + errdetail("Multi-node is deprecated and will be removed in future releases."))); +#endif + ArrayType *data_nodes = PG_ARGISNULL(0) ? NULL : PG_GETARG_ARRAYTYPE_P(0); char *sql = TextDatumGetCString(PG_GETARG_DATUM(1)); List *data_node_list = NIL; diff --git a/tsl/test/t/CMakeLists.txt b/tsl/test/t/CMakeLists.txt index 9782cd13cf3..2131ff223b0 100644 --- a/tsl/test/t/CMakeLists.txt +++ b/tsl/test/t/CMakeLists.txt @@ -1,8 +1,21 @@ -set(PROVE_TEST_FILES 001_simple_multinode.pl 003_connections_privs.pl - 009_logrepl_decomp_marker.pl) -set(PROVE_DEBUG_TEST_FILES - 002_chunk_copy_move.pl 004_multinode_rdwr_1pc.pl 005_add_data_node.pl - 006_job_crash_log.pl 007_healthcheck.pl 008_mvcc_cagg.pl) +set(PROVE_TEST_FILES 009_logrepl_decomp_marker.pl) + +set(PROVE_DEBUG_TEST_FILES 008_mvcc_cagg.pl) + +if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16") + list(APPEND PROVE_TEST_FILES 001_simple_multinode.pl 003_connections_privs.pl) +endif() + +if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16") + list( + APPEND + PROVE_DEBUG_TEST_FILES + 002_chunk_copy_move.pl + 004_multinode_rdwr_1pc.pl + 005_add_data_node.pl + 006_job_crash_log.pl + 007_healthcheck.pl) +endif() if(CMAKE_BUILD_TYPE MATCHES Debug) list(APPEND PROVE_TEST_FILES ${PROVE_DEBUG_TEST_FILES})